blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4ef1e04e37c82995efac4db5e381e3d3431180b5 | Python | RAPIDS-NU/NBAstats | /nbastats.py | UTF-8 | 6,399 | 2.75 | 3 | [] | no_license | import json
import requests
import pandas as pd
import numpy as np
import seaborn as sns
import pprint as pprint
import NBAData as nba
import time
pd.set_option('display.max_rows', 10000)
pd.set_option('display.max_colwidth', 10000)
pd.set_option('display.width', None)
sns.set_color_codes()
sns.set_style("white")
#opening the json file
with open('0021500293.json') as data_file:
game_data = json.load(data_file)
#gets the movement data from a
def get_movement_data(data):
gameID = data["gameid"]
gameDate = data["gamedate"]
events = data["events"]
eventIDs = []
for i in range(len(events)):
eventIDs.append(events[i]["eventId"])
visitor = events[0]["visitor"]
home = events[0]["home"]
allMoments = []
for i in range(len(events)):
allMoments.append(events[i]["moments"])
# Column labels
headers = ["team_id", "player_id", "x_loc", "y_loc",
"radius", "moment", "game_clock", "period", "shot_clock"]
player_moments = []
#len(allMoments)
#for moments in allMoments:
moments = allMoments[1]
for moment in moments:
# For each player/ball in the list found within each moment
for player in moment[5]:
# Add additional information to each player/ball
# This info includes the index of each moment, the game clock
# and shot clock values for each moment
player.extend((moments.index(moment), str(time.strftime("%M:%S", time.gmtime(moment[2]))), moment[0], moment[3]))
#player["game_clock"] = time.strftime("%M:%S", time.gmtime(100))
player_moments.append(player)
movement_df = pd.DataFrame(player_moments, columns=headers)
players = home["players"]
players.extend(visitor["players"])
id_dict = {}
for player in players:
id_dict[player['playerid']] = [player["firstname"] + " " + player["lastname"],
player["jersey"]]
id_dict.update({-1: ['ball', np.nan]})
movement_df["player_name"] = movement_df.player_id.map(lambda x: id_dict[x][0])
movement_df["player_jersey"] = movement_df.player_id.map(lambda x: id_dict[x][1])
return movement_df
# gets the play by play data from the given json file and returns it in a dataframe
def get_play_by_play(json):
data = json
game_ID = data["gameid"]
date = str(data["gamedate"])
game_date = date.translate(str.maketrans('', '', '-'))
q1 = nba.nba_data("play_by_play", game_date, str(game_ID), "1")
q2 = nba.nba_data("play_by_play", game_date, str(game_ID), "2")
q3 = nba.nba_data("play_by_play", game_date, str(game_ID), "3")
q4 = nba.nba_data("play_by_play", game_date, str(game_ID), "4")
game = [q1, q2, q3, q4]
playheaders = ["clock", "description", "eventMsgType", "hTeamScore", "isScoreChange", "personId",
"teamId", "vTeamScore", "period"]
playbyplay = []
q = 0
for quarter in game:
q += 1
plays = quarter["plays"]
for play in plays:
play["period"] = q
play.pop("formatted", None)
play.pop("isVideoAvailable", None)
if play["eventMsgType"] == "1":
play["eventMsgType"] = "Make"
elif play["eventMsgType"] == "2":
play["eventMsgType"] = "Miss"
elif play["eventMsgType"] == "3":
play["eventMsgType"] = "Free Throw"
elif play["eventMsgType"] == "4":
play["eventMsgType"] = "Rebound"
elif play["eventMsgType"] == "5":
play["eventMsgType"] = "Turnover"
elif play["eventMsgType"] == "6":
play["eventMsgType"] = "Personal Foul"
elif play["eventMsgType"] == "7":
play["eventMsgType"] = "Violation"
elif play["eventMsgType"] == "8":
play["eventMsgType"] = "Substitution"
elif play["eventMsgType"] == "9":
play["eventMsgType"] = "Timeout"
elif play["eventMsgType"] == "10":
play["eventMsgType"] = "Jumpball"
playbyplay.append(play)
return pd.DataFrame(playbyplay, columns=playheaders)
#syncs up the play by play dataframe for every shot attempt with the
#positional data from the movement dataframe to give movement data for every shot attempt in the game
def get_shot_data(data):
move_data = get_movement_data(data)
pbp_data = get_play_by_play(data)
shot_data = pbp_data.loc[pbp_data["eventMsgType"].isin(["Make", "Miss"])]
game_data = move_data.reindex(columns=["team_id", "player_id", "x_loc", "y_loc",
"radius", "moment", "game_clock", "shot_clock",'description', 'eventMsgType', 'hTeamScore',
'isScoreChange', 'personId', 'teamId', 'vTeamScore', "period"])
moments = game_data.moment.unique()
for moment in moments:
moment_data = game_data.loc[game_data['moment'] == moment]
moment_period_data = moment_data["period"]
moment_clock_data = moment_data["game_clock"]
play = shot_data.loc[shot_data["clock"].isin(moment_clock_data.tolist())
& shot_data["period"].isin(moment_period_data.tolist())].copy()
#print(moment_clock_data)
if len(play) > 0:
game_data.loc[game_data['moment'] == moment, ["description"]] = play["description"].iloc[0]
game_data.loc[game_data['moment'] == moment, ["eventMsgType"]] = play["eventMsgType"].iloc[0]
game_data.loc[game_data['moment'] == moment, ["hTeamScore"]] = play["hTeamScore"].iloc[0]
game_data.loc[game_data['moment'] == moment, ["isScoreChange"]] = play["isScoreChange"].iloc[0]
game_data.loc[game_data['moment'] == moment, ["personId"]] = play["personId"].iloc[0]
game_data.loc[game_data['moment'] == moment, ["teamId"]] = play["teamId"].iloc[0]
game_data.loc[game_data['moment'] == moment, ["vTeamScore"]] = play["vTeamScore"].iloc[0]
return game_data[game_data.description.notnull()]
shots = (get_shot_data(game_data))
print(shots)
#game_pbp = get_play_by_play(game_data)
#parsed = nba.nba_data("play_by_play", "20151205", "0021500293", "1")
#print(json.dumps(parsed, indent=4, sort_keys=True))
# print(nba.nba_data("play_by_play", "20151205", "0021500293", "1"))
| true |
1601990b73c524f477b7e0cdd4e2a99333c58bdd | Python | orram/Curiosity | /RUN.py | UTF-8 | 1,330 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Lets see it all run!!
"""
import gym
import gym_autoRobo
import numpy as np
import matplotlib.pyplot as plt
from LearningSteps import LearningStep
import utilities
learn = LearningStep()
learn.flatten_image = False
learn.prunning_treshold = 0.5
print(vars(learn))
learn.initialize_nets(all_nets = False, selected_nets = [[["camera t-1", "camera action t"], "camera t"],
[["camera angle t-1", "arm action t"], "camera angle t"]] )
env = gym.make('autoRobo-v0')
env.reset()
observation = env.reset()
epochs = 150
for t in range(epochs):
#print(t)
action = utilities.ChooseActionNoBrain(observation)
observation, reward, done, info = env.step(action) #observation includes
#camera angles, arm angles, rgb image, actions - all in time t
#the order is as follows:
#[arm angle, camera angle, arm action, camera action, rgb image]
learn.get_data(observation, t)
if t>1:
learn.onlineStep()
env.close()
b = 1
plt.subplots_adjust(left=5, bottom=5, right=6.5, top=6.5, wspace=None, hspace=1)
for a in learn.loss_dict:
ax = plt.subplot(len(learn.loss_dict), 1, b )
b += 1
plt.plot(learn.loss_dict[a])
ax.set_title(a)
| true |
71255c30a6800033f610bd35d5940e9356695a0f | Python | jackjchen/map | /helios/pipeViewer/pipe_view/model/plain_file.py | UTF-8 | 748 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | import shlex
## reads a file created by neato -Tplain <file> > outfile
class Plain:
# indices into data
NODE_POSX = 0
NODE_POSY = 1
NODE_DATA = 4
def __init__(self, filename):
# objects keyed by identifiers
self.nodes = {}
self.edges = []
self.bounds = (0, 0)
s = open(filename)
for line in s:
fields = shlex.split(line)
# field 0 is type of line
if fields[0] == 'node':
self.nodes[fields[1]] = fields[2:]
elif fields[0] == 'edge':
self.edges.append((fields[1], fields[2]))
elif fields[0] == 'graph':
self.bounds = (float(fields[2]), float(fields[3]))
s.close()
| true |
f55552aa39513100e1711ef1393b909fe373650c | Python | nathantheinventor/solved-problems | /uva/11831 Sticker Collector Robots/sticker.py | UTF-8 | 1,270 | 2.65625 | 3 | [] | no_license | left = {"N": "W", "E": "N", "S": "E", "W": "S"}
right = {"N": "E", "E": "S", "S": "W", "W": "N"}
move = {"N": -1j, "E": 1, "W": -1, "S": 1j}
dir = {"O": "W", "N": "N", "S": "S", "L": "E"}
n, m, s = map(int, input().split())
while n > 0:
# get the grid bounded by #s
grid = [["#"] * (m + 2)]
for _ in range(n):
grid.append(["#"] + list(input()) + ["#"])
grid.append(["#"] * (m + 2))
curPos = 0
orientation = "N"
for i in range(1, n + 1):
for j in range(1, m + 1):
if grid[i][j] in ("N", "S", "L", "O"):
orientation = dir[grid[i][j]]
grid[i][j] = '.'
curPos = i * 1j + j
ans = 0
for c in input():
# print(c, curPos, orientation)
if c == "E":
orientation = left[orientation]
elif c == "D":
orientation = right[orientation]
elif c == "F":
curPos += move[orientation]
new = grid[int(curPos.imag)][int(curPos.real)]
if new == '#':
curPos -= move[orientation]
elif new == "*":
grid[int(curPos.imag)][int(curPos.real)] = '.'
ans += 1
print(ans)
n, m, s = map(int, input().split()) | true |
706ad9fc67502211c7492e87c7e20f47d7e3e827 | Python | dkwired/coursework | /cs141/labs/lab1/fib2.py~ | UTF-8 | 472 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python2.7
#
###################################
# CS141, 12 Spring
#
# fib2.py
###################################
import sys, timeit
sys.setrecursionlimit(1000)
def fib2_a(n):
if n==1:
return (0, 1)
else:
a, b = fib2_a(n-1)
print a,b
return (b, a+b)
def fib2(n):
if n<1:
return n
return fib2_a(n)[1]
n=10
if len(sys.argv)>1:
n=int(sys.argv[1])
t=timeit.Timer(lambda: fib2(n))
l=t.timeit(1)
print l
| true |
c0e7ad7e3b65b977f883e72a1ee182bf12faab50 | Python | BlesslinJerishR/PyCrash | /__5__IfStatements__/ordinal_numbers.py | UTF-8 | 353 | 3.21875 | 3 | [] | no_license | #ordinal_numbers.py
#5.11
#import
import sys
from _0_AddOns.defs import *
ordinal_numbers = list_numbers(9)
zero_remover(ordinal_numbers)
print(ordinal_numbers)
for number in ordinal_numbers:
if number == 1:
print(f"{number}st")
elif number == 2:
print(f"{number}nd")
elif number == 3:
print(f"{number}rd")
else:
print(f"{number}th")
| true |
06857ad676a70968a0cb24eb78cb699a6d371b02 | Python | ispastlibrary/Titan | /2015/AST1/vezbovni/anja/liste.py | UTF-8 | 789 | 3.625 | 4 | [] | no_license | lista = ['jan', 3, 'mart', 3.14]
print(lista[1])
print (len(lista[0])
# ovo nam vraca duzinu liste/clana
lista1=np.array[1,2,3,4]
lista2=np.array[7.8.9.4]
lista3=lista1+lista2
for i in lista:
print(i)
#vraca clan po clan funkcije
for i in range(len(lista)):
print(lista[i])
for i in range(len(lista)):
print(i)
lista.pop()
print(lista)
#izbacuje poslednji clan
lista.insert(1, 'asd')
print (lista)
#ovo prvi clan pretvrara u asd i pomera ostale u desno
import numpy as np
start = 0
stop = 10
korak= 0.1
x=np.arange(start, stop, korak)
#sve clanove od 0 do 10 za 0.1
print(x)
np.sin(np.pi/2)
np.cos(2)
np.exp(1)
np.log(32)# ovo je log na osnovu e od 32
np.log10(32)# ovo je log na osnovu 10 od 32
lista = [1,2,3,4,5,6]
def fun(x):
return x[0]
print(fun(lista))
| true |
fbc76c600aebe7364981d0c7408b4f0176f99cc9 | Python | Michael-Joe/origin_server | /pythonWorkspace/bmi.py | UTF-8 | 234 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
height = 1.75
weight = 80.5
bmi = weight/(height * height)
print ('bmi:',bmi)
if bmi < 18.5:
print ('too thin')
elif bmi < 25:
print ('normal')
elif bmi < 32:
print ('too fat')
else:
print ('very fat')
| true |
f30d89cabdf10fcd20170e042e8dd2936b3422ce | Python | KevinLoudi/Python3 | /src/ui.py | UTF-8 | 3,779 | 2.65625 | 3 | [] | no_license | """
ui core of monpoly
&1 2018-2-4 Kevin
create monpoly place display
&2 2018-2-4 Kevin
create a list to display a group of places
&2 2018-2-4 Kevin
organize layout of display
Author: Kevin
Last edited: August 2017
"""
import sys
from PyQt5.QtWidgets import QLabel, QApplication,QVBoxLayout, QWidget, QPushButton, QHBoxLayout,QGroupBox, QGridLayout
from PyQt5.QtGui import QIcon
from enum import Enum
from numpy import ndarray
nUiLocationX = 300
nUiLocationY = 300
nUiSizeX = 1200
nUiSizeY = 800
szUiTiele = "Monpoly"
szUiIcoPath = 'web.png'
class Color(Enum):
R = 1
Y = 2
G = 3
class PlaceType(Enum):
PLACE = 0
STREET = 1
STATION = 2
class UI(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(nUiLocationX, nUiLocationY, nUiSizeX, nUiSizeY)
self.setWindowTitle(szUiTiele)
self.setWindowIcon(QIcon(szUiIcoPath))
self.place = []
self.place_info = []
self.createLayout2()
# windowLayout = QVBoxLayout()
# windowLayout.addWidget(self.horizontalGroupBox)
# self.setLayout(windowLayout)
print("size of place list is ", len(self.place))
self.show()
def createLayout(self):
self.horizontalGroupBox = QGroupBox("Grid")
layout = QGridLayout()
layout.setColumnStretch(1, 4)
layout.setColumnStretch(2, 4)
self.place_info = createDefPlaceList(9)
nIdx = 0
for i in range(0, 2):
for j in range(0, 2):
layout.addWidget(QPushButton(self.place_info[nIdx].getInfo()), i,j) #self.place_info[nIdx].getInfo()
nIdx = nIdx + 1
self.horizontalGroupBox.setLayout(layout)
def createLayout2(self):
grid = QGridLayout()
self.setLayout(grid)
nLayoutRow = 4
nLayoutCol = 5
szLayout = ['0', '1', '2', '3', '4',
'13', '', '', '', '5',
'12', '', '', '', '6',
'11', '10', '9', '8', '7']
lstPlaceInfo = []
for i in range(0, 14):
p_info = PlaceInfo('place', Color.R, '300', '120', 'banker')
lstPlaceInfo.append(p_info)
layoutMap = self.createLayoutMap(nLayoutRow,nLayoutCol, [nUiSizeX, nUiSizeY])
i = 0
j = 0
btn = QPushButton(lstPlaceInfo[10].getInfo(), self)
btn.resize(30,30)
def createLayout3(self):
def createLayoutMap(self, nRow, nCol, nWindowSize):
[nTotSizeX,nTotSizeY] = nWindowSize
nStepX = nTotSizeX/nCol
nStepY = nTotSizeY/nRow
tdPosMap = ndarray((nRow, nCol))
for i in range(nRow):
for j in range(nCol):
tdPosMap[i][j] = 1#[i*nStepX, j*nStepY]
print(len(tdPosMap))
return tdPosMap
class PlaceInfo:
def __init__(self, szType, enColGrp, szSaleVal, szMorVal, szOwner):
self.type = szType
self.col = enColGrp
self.svalue = szSaleVal
self.mvalue = szMorVal
self.owner = szOwner
def getInfo(self):
szInfo = self.type + '\n' + self.svalue + '\n' + self.owner
return szInfo
def getColor(self):
return self.col
def createPlace(obj, szTit, nLocx, nLocy, nSizex, nSizey):
btn = QPushButton(szTit, obj)
btn.setGeometry(nLocx, nLocy, nSizex, nSizey)
return btn
def createDefPlaceList(nNum):
lstPlaceInfo = []
for i in range(0, nNum):
p_info = PlaceInfo('place', Color.R, '300', '120', 'banker')
lstPlaceInfo.append(p_info)
#print('new records')
return lstPlaceInfo
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = UI()
sys.exit(app.exec_())
| true |
71f7d10fd66b15705eee55c45d47318ebdc00808 | Python | DongZhuoran/Artificial-Intelligence-CSCI561 | /hw1/hw1b/autotest/autoTestScript.py | UTF-8 | 1,422 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import random
import os
import sol4
def main():
num_cases = 10000
num_pass = 0.0
l = range(0, 10)
l = [x * num_cases * 0.1 for x in l]
for i in xrange(num_cases):
if i in l:
print float(i) / num_cases
testCasesCreator()
ret = sol4.main()
os.system("hongyuSolution.py")
fp = open("output.txt")
ret2 = int(fp.readline())
if ret == ret2:
num_pass += 1
else:
print ret, ret2
input("stop")
fp.close()
print "Similarity:", num_pass / num_cases * 100, "%"
def testCasesCreator():
try:
fp = open("input.txt", "w")
fp2 = open("input_cmp.txt", "w")
except IOError:
pass
else:
n = random.randint(0, 15) # length of grid
p = random.randint(0, n) # number of police
s = random.randint(0, n * n) # coord of scooters
fp.writelines([str(n) + "\n", str(p) + "\n", str(s) + "\n"])
fp2.writelines([str(n) + "\n", str(p) + "\n", str(s) + "\n"])
for i in xrange(s * 12):
x = random.randint(0, n - 1)
y = random.randint(0, n - 1)
fp.write(str(x) + "," + str(y) + "\n")
fp2.write(str(x) + "," + str(y) + "\n")
fp.close()
fp2.close()
if __name__ == '__main__':
main() | true |
c39152945a83eb631f1f725d24865e02935347f7 | Python | cold-pumpkin/Recommender-Project | /3.Modeling/1.XGBoost/MF_1.py | UTF-8 | 1,252 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 22 15:57:26 2018
@author: philip
"""
#################################################################
#################### Matrix Factorization #######################
#################################################################
## 데이터 읽어오기
import pandas as pd
import numpy as np
original_data = pd.read_csv("/Users/philip/Workspace/Final_Project/Data/02.구매상품TR.csv", sep=',')
original_data.info()
## 구매금액을 고객번호별로 합산
original_data.head()
sum_data = original_data.groupby('고객번호').agg({'구매금액':'sum'})
sum_data = sum_data.reset_index()
sum_data.columns = ['고객번호', '총구매금액']
## 총구매금액 상위 25%, 50%, 75%의 기준으로 나누기
sum_data.describe()
sum_data1 = sum_data[sum_data['총구매금액'] > 39349999]
sum_data2 = sum_data[np.logical_and(sum_data['총구매금액'] > 10929999, sum_data['총구매금액'] < 39350000)]
sum_data4 = sum_data[sum_data['총구매금액'] < 10929999]
len(sum_data1)
len(sum_data2)
len(sum_data4)
##
data2_cust = sum_data2['고객번호']
data2_cust
final_data = original_data.loc[original_data['고객번호'].isin(data2_cust)]
| true |
bf755c1b08f0afd11154c35c471b50509b49b579 | Python | beingveera/whole-python | /python/projects/100`s of python/main.py | UTF-8 | 162 | 3.25 | 3 | [] | no_license | class ran:
def number(self,no):
for i in range(1,100):
l=no/i
print(" {} ".format(l))
user=ran()
x=int(input())
user.number(x)
| true |
a7d9e682fbdcd23ecbfaf1b172428c04aae471c4 | Python | huangshizhi/learngit | /icis_to_mysql.py | UTF-8 | 5,536 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 23 09:40:50 2021
@author: huangshizhi
测试从excel自动更新安迅思数据到数据库
ICIS Excel Plug-In
WDF.Addin
"""
from logger import Logger
import numpy as np
import time
import pandas as pd
from sqlalchemy import create_engine
from scrapy_util import *
from datetime import datetime,date
import time
import win32con
import win32gui
from pymouse import PyMouse
def trigger_update_excel_data(frameclass,frametitle, sleep_time=5):
'''
根据excel更新及保存位置刷新并保存数据;
窗口最大化后更新和保存按钮的坐标不变;
'''
stime = time.time()
hwnd = win32gui.FindWindow(frameclass, frametitle)
print(hwnd)
if hwnd != 0:
win32gui.ShowWindow(hwnd, win32con.SW_SHOWMAXIMIZED)#窗口最大化
win32gui.SetForegroundWindow(hwnd) # 设置前置窗口
m = PyMouse() #建立鼠标对象
#icis更新按钮坐标
icis_location = (155, 13)
#excel保存按钮坐标
save_location = (48, 6)
#双击
log.logger.info("开始更新数据......")
m.click(icis_location[0],icis_location[1])
m.click(icis_location[0],icis_location[1])
m.click(icis_location[0],icis_location[1])
log.logger.info("数据更新结束!......")
time.sleep(sleep_time)
#保存数据
m.click(save_location[0],save_location[1])
m.click(save_location[0],save_location[1])
m.click(save_location[0],save_location[1])
log.logger.info("保存数据结束!")
log.logger.info("更新并保存安迅思数据共耗时%.2fs"%(time.time()-stime))
def get_quote_type(icis_column_name):
'''
根据安迅思列名,得到对应报价类型,如【国内价-进口价-CFR】
'''
if 'domestic' in icis_column_name:
return 'domestic'
if 'import' in icis_column_name:
return 'import'
if 'CFR' in icis_column_name:
return 'CFR'
if 'China' in icis_column_name:
return 'domestic'
else:
return ''
def update_icis_data(icis_file_name,update_days,isis_column_mapping_data,mysql_engine,schema_name,tmp_schema_name,table_name):
'''
读取excel中安迅思数据,更新并加载到mysql数据库
update_days:每次更新的天数
isis_column_mapping_data:安迅列映射
mysql_engine:mysql引擎名
schema_name:库名
tmp_schema_name:临时表名
table_name:表名
'''
log.logger.info("开始更新数据......")
start_time = time.time()
#读取excel数据
icis_data = pd.read_excel(icis_file_name,header = 11,index_col= 0,skipfooter = 11)
icis_data.index = range(len(icis_data))
icis_data['dt'] = icis_data['Date'].apply(lambda x : datetime.strftime(x, "%Y%m%d"))
print("最大更新日期为:"+str(max(icis_data['dt'])))
icis_data = icis_data.drop(['Date'],axis=1)
#只更新近30天的数据
icis_data = icis_data[-1*(update_days):]
#转成窄表
mydata1=icis_data.melt(id_vars=["dt"], #要保留的主字段
var_name="icis_column_name", #拉长的分类变量
value_name="icis_price" #拉长的度量值名称
)
mydata2 = mydata1.dropna()
mydata3 = pd.merge(left = mydata2,right=isis_column_mapping_data,how='left',on=['icis_column_name'])
mydata3['prod_unit'] = mydata3.icis_column_name.apply(lambda x: x.split(':')[1]) #报价单位
#价格类型【低-中-高】
mydata3['price_type'] = mydata3.icis_column_name.apply(lambda x : x[x.rfind('(')+1:x.rfind(')')])
#报价类型【国内-进口价-CFR】
mydata3['quote_type'] = mydata3.icis_column_name.apply(get_quote_type)
#更新频率
mydata3['original_frequency'] = mydata3.icis_column_name.apply(lambda x : 'Daily' if 'Daily' in x else 'Weekly')
mydata3 = mydata3.fillna("")
mydata3['hashkey'] = mydata3.apply(generate_hashkey,args=(['icis_column_name','dt']),axis=1)
update_columns =['dt','icis_column_name','icis_price','mapping_eng_column_name',
'mapping_chn_column_name','prod_unit','price_type','quote_type','original_frequency']
save_data_to_mysql(mydata3,mysql_engine,schema_name,tmp_schema_name,table_name,update_columns)
log.logger.info("完成安迅思数据共更新耗时%.2fs"%(time.time()-start_time))
if __name__=='__main__':
logfilename = r"E:\project\data_center\code\Log\icis_to_mysql.log"
#logfilename = r"C:\Log\icis_to_mysql.log"
log = Logger(logfilename,level='info')
log.logger.info("-"*50)
#刷新并保存安迅思数据
trigger_update_excel_data("XLMAIN", "icis_data.xlsx - Excel",sleep_time=1)
mysql_con = "mysql+pymysql://root:dZUbPu8eY$yBEoYK@27.150.182.135/"
mysql_engine = create_engine(mysql_con,encoding='utf-8', echo=False,pool_timeout=3600)
schema_name = "market_db"
tmp_schema_name = "tmp_market_db"
table_name = "icis_spot_data"
update_days = 30 #全量更新时,取366即可,默认更新近一年数据
isis_column_mapping_sql = '''SELECT icis_column_name, mapping_eng_column_name,
mapping_chn_column_name FROM market_db.isis_column_mapping
'''
isis_column_mapping_data = pd.read_sql(isis_column_mapping_sql,con = mysql_engine)
icis_file_name = r"C:\data\icis_data.xlsx"
#更新安迅思数据
update_icis_data(icis_file_name,update_days,isis_column_mapping_data,mysql_engine,schema_name,tmp_schema_name,table_name) | true |
f12db3a8e6133ccf1044cf4330d45683168d7043 | Python | mit-d/euler | /euler.py | UTF-8 | 1,008 | 4 | 4 | [] | no_license | def factors(n):
"""
Returns list of all factors of n
"""
ls = []
f = 1
m = n
while f <= n:
m = n
if m % f == 0:
ls.append(f)
m /= f
f = f + 1
return ls
def list_primality(n):
"""
Return a list of booleans representing each number in [1,n]'s primality
"""
# Sieve of Eratosthenes
result = [True] * (n + 1) # List of length n with values set to True
result[0] = result[1] = False # 1 and 2 are prime
for i in range(sqrt(n) + 1):
if result[i]: # Only compute if not marked False yet
for j in range(i * i, len(result), i):
result[j] = False # mark all multiples
return result
def list_primes(n):
return [i for (i, isprime) in enumerate(list_primality(n)) if isprime]
def sqrt(x):
assert x >= 0
i = 1
while i * i <= x:
i *= 2
y = 0
while i > 0:
if (y + i) ** 2 <= x:
y += i
i //= 2
return y
| true |
d3177b9f6414a6fac162d872256b121dbaee19fd | Python | CedricJ08/Stock_prediction | /Code Annex/RNN_AWS.py | UTF-8 | 2,789 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 13:33:30 2018
@author: Alu
"""
Day_before= 60
Size_test = 100
import numpy as np
import pandas as pd
dataset = pd.read_csv('data.csv')
dataset_train = dataset[Size_test:]
dataset_test = dataset[:Size_test]
################################################# Preprocess Train ######################################
training_set = dataset_train.iloc[:, 4:5].values
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
X_train = []
y_train = []
for i in range(len(training_set)-Day_before):
X_train.append(training_set_scaled[i+1:i+Day_before +1, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
################################################### Fit Model #############################################
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
regressor = Sequential()
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
################################################# Preprocess Testset ########################################
real_stock_price = dataset_test.iloc[:, 4:5].values
dataset_total = pd.concat((dataset_test['close'] , dataset_train['close']), axis = 0)
inputs = dataset_total[:len(dataset_test) + Day_before].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(len(inputs)-Day_before):
X_test.append(inputs[i+1:i+Day_before+1, 0])
X_test = np.array(X_test)
################################################### Predict #################################################
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
predicted_stock_price=predicted_stock_price.reshape(1,-1)[0]
real_stock_price = real_stock_price.reshape(1,-1)[0]
np.save('pred',predicted_stock_price)
np.save('true',real_stock_price) | true |
1e2bb2eae486452dd6adc049027a044b828de40a | Python | HeHisHim/restartTornado | /testTornado/testTorXSRF.py | UTF-8 | 2,502 | 2.640625 | 3 | [] | no_license | """
XSRF 跨站请求伪造
在Application构造函数中设置xsrf_cookies = True, 因为xsrf_cookies涉及到安全Cookie, 所以还需要同时配置cookie_secret开启密钥
当这个参数被设置时, Tornado将拒绝请求中不包含正确_xsrf值的POST, PUT和DELETE请求
并报错 403 Forbidden('_xsrf' argument missing from POST)
"""
"""
在模板中使用XSRF保护, 只需在模板中添加
{% module xsrf_form_html() %} -- xsrf_token.html
这样在会在模板代码中嵌入一句
<input type="hidden" name="_xsrf" value="2|746f1f8b|cf87bcd4923e5418549766b034d992cf|1554800413"/>
并在cookie中新增了一个_xsrf键值对
"""
"""
RequestHandler.xsrf_token
@property
def xsrf_token(self) -> bytes
该方法本质上是调用了self.set_cookie("_xsrf", self._xsrf_token, **cookie_kwargs)
在cookie中写上_xsrf的值
"""
import tornado.web
import tornado.ioloop
import tornado.httpserver
import tornado.options
import json
import os
import uuid, base64
from tornado.options import options, define
from tornado.web import RequestHandler, MissingArgumentError
define("port", default = 8000, type = int, help = "run server on the given port.")
class Application(tornado.web.Application):
def __init__(self):
handles = [
(r"/", IndexHandler),
(r"^/(.*)$", StaticFileHandler, {"path": os.path.join(current_path, "static/html")}),
]
settings = dict(
debug = True,
static_path = os.path.join(current_path, "static"),
template_path = os.path.join(current_path, "template"),
cookie_secret = "p8ekSGn5STe7MpVopQjQgUoE1fjuMUtDjTLPWrgVKKg=", # 配置密钥
xsrf_cookies = True,
)
tornado.web.Application.__init__(self, handlers = handles, **settings)
# 继承tornado.web.StaticFileHandler, 进入静态页面的时候就设置xsrf_token
class StaticFileHandler(tornado.web.StaticFileHandler):
def __init__(self, *args, **kwargs):
tornado.web.StaticFileHandler.__init__(self, *args, **kwargs)
self.xsrf_token
class IndexHandler(RequestHandler):
def get(self):
# self.xsrf_token # 收到get请求就设置token
self.render("xsrf_token.html")
def post(self):
self.write("OK")
if __name__ == "__main__":
current_path = os.path.dirname(__file__)
tornado.options.parse_command_line()
app = Application()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
| true |
e02a476b3e16bf6b67a5607d8dbb83e714a1e95b | Python | krombo-kode/AdventOfCode2020 | /Day6/solution.py | UTF-8 | 997 | 3.140625 | 3 | [] | no_license | import copy
def answer_list_maker(input_file):
groups_answers = []
with open(input_file, "r") as file:
temp_lines = []
for line in file:
if line != "\n":
temp_lines.append(line.rstrip("\n"))
else:
groups_answers.append(copy.copy(temp_lines))
temp_lines.clear()
groups_answers.append(copy.copy(temp_lines))
return groups_answers
def any_yes_counter(group):
result = len(set("".join(group)))
return result
def all_yes_counter(group):
count = 0
answers = "".join(group)
checks = set(answers)
for check in checks:
if answers.count(check) == len(group):
count +=1
return count
def yes_sum_finder(groups,func):
total = 0
for group in groups:
total += func(group)
return total
print(yes_sum_finder(answer_list_maker("input.txt"),any_yes_counter))
print(yes_sum_finder(answer_list_maker("input.txt"),all_yes_counter))
| true |
498f2adaef136c66916e635abbe1e2d9eae1f3dd | Python | karnrage/PyStack | /dictionaries.py | UTF-8 | 333 | 3.390625 | 3 | [] | no_license |
self_info = {"name": "kamalpreet", "age": "30", "language":"english"} #literal notation
# self_info = {} #create an empty dictionary then add values
# self_info["name"] = "Kamalpreet"
# self_info["age"] = "30"
# self_info["language"] = "english"
# data = ""
# val = ""
for key, value in self_info.items():
print key, value
| true |
7606cd6e92458d436e19e31c51896080745b6e31 | Python | B314-N03/PythonProjects | /RandomPassGerman.py | UTF-8 | 532 | 3.640625 | 4 | [] | no_license | import random
import string
import pyperclip
def randPassw(length):
digits = string.digits
lower = string.ascii_lowercase
upper = string.ascii_uppercase
special = str(['@' '!''#' '*''$' '§''&'])
passw = ''.join(random.choice(digits + upper + lower + special) for i in range(length))
print("Random passwort mit der Länge ", length, " ist: ", passw)
pyperclip.copy(passw)
print("Es wurde in die Zwischenablage kopiert!")
randPassw(int(input("Welche Länge soll das Passwort haben ? : ")))
| true |
d18cf3c4415c7594826144b22fec91997e046c76 | Python | anima-unr/Distributed_Collaborative_Task_Tree_ubuntu-version-16.04 | /vision_manip_pipeline/scripts/jb_Yolo_obj_det.py | UTF-8 | 1,110 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
from gpd.msg import GraspConfigList
from darknet_ros_msgs.msg import BoundingBoxes
from darknet_ros_msgs.msg import BoundingBox
# global variable to store object_locations
obj_loc = []
# Callback function to receive bounding boxes.
def callback(msg):
global obj_loc
obj_loc = msg.boundingBoxes
def talker(obj_name):
# Create a ROS node.
rospy.init_node('get_object_loc')
# Subscribe to the ROS topic that contains the grasps.
sub = rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, callback)
# Wait for grasps to arrive.
rate = rospy.Rate(1)
while not rospy.is_shutdown():
# print obj_loc
for item in obj_loc:
if item.Class == obj_name:
print item
# calculate the center of the bounding box
x = (item.xmax - item.xmin)/2 + item.xmin
y = (item.ymax - item.ymin)/2 + item.ymin
print x,y
rate.sleep()
# ==================== MAIN ====================
if __name__ == '__main__':
talker('cup') | true |
9920ee09eececcf9857c04661e559e5d06444701 | Python | JpradoH/Ciclo2Java | /Ciclo 1 Phyton/Unidad 2/Ejercicios/Imprimir cadenas str.py | UTF-8 | 319 | 3.75 | 4 | [] | no_license | #imprimr cadenas de str de varias formas
camellos = 42
ver ='Hevisto %d camellos' % camellos #% cumple funcion de .format. he imprimir str
ver1 ='Hevisto {} camellos'.format(camellos) # #una forma de imprimir str
ver2 ='Hevisto '+str(camellos)+ ' camellos' #una forma de imprimir str
print(ver)
print(ver1)
print(ver2) | true |
3f6218cb627e064b413ee970b4f4f9f41d1f8d5c | Python | islamuzkg/LPTHW | /ex31_MakingDecisions/ex31.py | UTF-8 | 4,548 | 3.734375 | 4 | [] | no_license | # Apologize for spelling mistake, please, just don't tell my wife
print """
You enter a dark room through below doors.
Each does will take you to different adventure.
#1 bear
#2 insanity
#3 media
#4 gym
#5 technology
#6 school
#7 food
#8 animals
"""
door = raw_input("> ")
if door == "1":
print "There's a giant bear eating a cheese cake. What do you do?"
print "1. Taka the cake."
print "2. Scream at the bear."
print "3. Lid a fire to scare the bear"
bear = raw_input("> ")
if bear == "1":
print "Bear eats your face off. Good job!"
elif bear == "2":
print "Bear eats your leg off. Good job!"
elif bear == "3":
print "That was not bad, now cheese cake is yours."
else:
print "Well doing %s is probably better, Bear runs away." % bear
elif door == "2":
print "You stare into endless abyss at the Cthulhu's retina."
print "1. Blueberries."
print "2. Yello jackets clothespins."
print "3. Understanding resolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body services powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
elif door == "3":
print """
Where do you usually watch movies or shows?"
1. Youtube
2. Netflix
3. Hulu
4. AMC
"""
media = raw_input("> ")
if media == "1":
print "Youst know how to search what you watch"
elif media == "2":
print "Tired of watching of old stuffs, although they have some cool stuffs to offer"
elif media == "3":
print "Must be same as Netflix, not bad"
elif media == "4":
print "You must be single"
else:
print "You do not like watching movies or shows? You might be doing right thing."
elif door == "4":
print "Where do you go for exercises?"
print "1. Martial Art"
print "2. Lifting"
print "3. Gymnastics."
print "4. Swimming"
gym = raw_input("> ")
if gym == "1" or gym == "2":
print "Sport must be part of your lifestyle."
print "\nWhat kinda sport do you like?"
print "1. Self defense."
print "2. Bodybuilding"
sport = raw_input("> ")
if sport == "1":
print "Be water my friend."
elif sport == "2":
print "Make sure you don't skip the leg day."
else: # it is printing else block as well when if 1st if block is true
print "Any sport is better than nothing!"
elif gym == "3" or gym == "4":
print "Healthy body will have healthy mind!"
print "\n Please tell us which one you like? \n1. Gymnastics \n2. Swimming"
sport = raw_input("> ")
if sport == "1":
print "You are flexible person."
elif sport == "2":
print "You must swim like a wish."
else:
print "You are busy with something else"
else:
print "You are missing a lot"
elif door == "5":
print "\nWhich of these phones do you have"
print "\n1. Iphone \n2. Android \n3 Google phone"
tech = raw_input("> ")
if tech == "1":
print "You are fan of Steve Job."
elif tech == "2":
print "You lke your freedom."
elif tech == "3":
print "like to try new stuffs?"
else:
print "You dont chase the brand, smart one"
elif door == "6":
print "\n What is your highest level of education"
print """
1. High school.
2. Bachelor's degree.
3. Master degree.
"""
school = raw_input("> ")
if school == "1":
print "You have some way to go for success."
elif school == "2":
print "You might be thinking, if you need start working or continue to master degree."
elif school == "3":
print "You are pretty settled, and looking for a job."
else:
print "Education is important, hope you have a plan"
elif door == "7":
print "How do you get you protain?"
print """
1. Beef
2. Chicken
3. Fish
4. veggie
"""
food = raw_input("> ")
if food == "1":
print "You're a meat lover"
elif food == "2":
print "Healthy option, that sounds good."
elif food == "3":
print "It is realy good stuff, I bet you love sushi."
elif food == "4":
print "It is the best way to stay lean and fit."
else:
print "Protain is important for your body."
elif door == "8":
print "What is you fave animal?"
print """
1. cat
2. dog
"""
animal = raw_input("> ")
if animal == "1" or animal == "2":
print "You have very kind heard!"
else:
print "please enter animal name you like, else just say no. No judging."
animal_name = raw_input("> ")
if (animal_name == "NO" or animal_name == "no") or animal_name == "No":
print "It is ok, no judging"
else:
print "%s is cool one" % animal_name
else:
print "You stumble around and fall on a knife and die. Good job!"
| true |
8c0e9372c43bac7910694b9cd47a10e9615ef7da | Python | ApplauseOSS/keycloak-config-tool | /keycloak_config/keycloak_client.py | UTF-8 | 7,278 | 2.90625 | 3 | [
"MIT"
] | permissive | """
Keycloak Client.
~~~~~~~~~~~~~~~~
"""
import re
import requests
import time
class NoSessionException(Exception):
pass
class KeycloakClient(object):
ADMIN_LOGIN_CLIENT_ID = 'admin-cli'
RELATIVE_HEALTH_CHECK_ENDPOINT = '/realms/master'
RELATIVE_TOKEN_ENDPOINT = '/realms/master/protocol/openid-connect/token'
HEALTH_CHECK_INTERVAL = 5
ACCESS_TOKEN_KEY = 'access_token'
REFRESH_TOKEN_KEY = 'refresh_token'
def __init__(self, base_url):
"""
Constructor.
:param base_url: The base URL of the Keycloak service.
:return: The Keycloak client.
"""
self.base_url = re.sub(r'/+$', '', base_url)
self.health_check_endpoint = self.base_url + self.RELATIVE_HEALTH_CHECK_ENDPOINT
self.token_endpoint = self.base_url + self.RELATIVE_TOKEN_ENDPOINT
self.session_data = None
# Wait for Keycloak to become available.
def wait_for_availability(self, timeout):
"""
Wait for Keycloak to become available.
:param timeout: The maximum amount of time to wait for Keycloak to become available.
:return: True if the Keycloak service became available, False otherwise.
"""
end_time = time.time() + timeout
while time.time() < end_time:
if self.check_availability():
print('==== Keycloak is available.')
return True
else:
print('==== Keycloak is not yet available.')
sleep_duration = min(end_time - time.time(), self.HEALTH_CHECK_INTERVAL)
time.sleep(sleep_duration)
print('==== Keycloak never became available.')
return False
# Check Keycloak availability by requesting the master realm data.
def check_availability(self):
"""
Check Keycloak availability by requesting the master realm data.
:return: True if the Keycloak service is available, False otherwise
"""
available = False
try:
response = requests.get(self.health_check_endpoint)
available = response.status_code == requests.codes.ok
except Exception:
pass
return available
def initialize_session(self, username, password):
"""
Initialize the admin session by logging in with the provided username and password.
:param username: The username to use when logging in
:param password: The password to use when logging in
:return: True if the login succeeds, False otherwise
"""
login_data = {
'grant_type': 'password',
'client_id': self.ADMIN_LOGIN_CLIENT_ID,
'username': username,
'password': password
}
try:
response = requests.post(self.token_endpoint, data=login_data)
if response.status_code == requests.codes.ok:
self.session_data = response.json()
print('==== Login succeeded.')
return True
else:
print('==== Login failed ({0}): {1}'.format(response.status_code, response.text))
return False
except Exception as err:
print('==== Login failed: {0}'.format(err))
return False
def refresh_session(self):
"""
Refresh the admin session by using the refresh token.
:return: True if the session refresh succeeds, False otherwise
"""
if not self.session_data:
raise NoSessionException()
login_data = {
'grant_type': 'refresh_token',
'client_id': self.ADMIN_LOGIN_CLIENT_ID,
'refresh_token': self.session_data['refresh_token']
}
try:
response = requests.post(self.token_endpoint, data=login_data)
if response.status_code == requests.codes.ok:
self.session_data = response.json()
print('==== Session refresh succeeded.')
return True
else:
print('==== Session refresh failed ({0}): {1}'.format(response.status_code, response.text))
return False
except Exception as err:
print('==== Session refresh failed: {0}'.format(err))
return False
def get(self, path, params=None, **kwargs):
"""
Performs a GET request.
:param path: The request path, relative to the base URL.
:param params: The query parameters.
:param kwargs: Additional parameters.
:return: The GET response.
"""
kwargs.setdefault('allow_redirects', True)
return self.execute_request('get', path, params=params, **kwargs)
def post(self, path, data=None, json=None, **kwargs):
"""
Performs a POST request.
:param path: The request path, relative to the base URL.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the request.
:param json: (optional) JSON data to send in the body of the request.
:param kwargs: Additional parameters.
:return: The POST response.
"""
return self.execute_request('post', path, data=data, json=json, **kwargs)
def put(self, path, data=None, **kwargs):
"""
Performs a PUT request.
:param path: The request path, relative to the base URL.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the request.
:param kwargs: Additional parameters.
:return: The PUT response.
"""
return self.execute_request('put', path, data=data, **kwargs)
def delete(self, path, **kwargs):
"""
Performs a DELETE request.
:param path: The request path, relative to the base URL.
:param kwargs: Additional parameters.
:return: The DELETE response.
"""
return self.execute_request('delete', path, **kwargs)
def add_bearer_token(self, **kwargs):
"""
Adds the "Bearer" token to the request parameters.
:param kwargs: The request parameters.
:return: The updated request parameters.
"""
bearer = 'Bearer {0}'.format(self.session_data['access_token'])
if 'headers' in kwargs:
kwargs['headers']['Authorization'] = bearer
else:
kwargs['headers'] = {'Authorization': bearer}
return kwargs
def execute_request(self, method, path, **kwargs):
"""
Generic method for performing requests.
:param method: The request method.
:param path: The request path, relative to the base URL.
:param kwargs: The request parameters.
:return: The resulting response.
"""
new_kwargs = self.add_bearer_token(**kwargs)
url = self.base_url + '/' + re.sub(r'^/+', '', path)
response = requests.request(method, url, **new_kwargs)
# We may need to perform a token refresh.
if response.status_code == requests.codes.unauthorized and self.refresh_session():
new_kwargs = self.add_bearer_token(**kwargs)
response = requests.request(method, url, **new_kwargs)
return response
| true |
c8f713858e2133a22c5e680a0e1217a24d828a35 | Python | JingkaiTang/github-play | /want_public_group/big_day_and_day/child/big_eye/fact/work_or_day.py | UTF-8 | 241 | 2.671875 | 3 | [] | no_license |
#! /usr/bin/env python
def company_and_young_week(str_arg):
new_man_and_child(str_arg)
print('problem')
def new_man_and_child(str_arg):
print(str_arg)
if __name__ == '__main__':
company_and_young_week('point_and_number')
| true |
22fb6e3adfb538d448bf91029032a829c5a8bd56 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2667/60898/317201.py | UTF-8 | 136 | 2.796875 | 3 | [] | no_license | t=eval(input())
for i in range(0,t):
arr=input().split()
i=int(arr[0])
l=int(arr[1])
result=pow(2,l)-i
print(result) | true |
d27d2a1ff323458663ade13e83c066edc82f8946 | Python | cwczarnik/machine_learning_analysis_packages | /logistic_regression_classifier_analysis.py | UTF-8 | 1,890 | 2.90625 | 3 | [] | no_license | from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve, precision_recall_curve
def logistic_regression_analysis(model,X_train,y_train,X_test,y_test):
model.fit(X_train,y_train)
y_pred = model.predict_proba(X_test)[:,1]
prec, rec, thresh_ = precision_recall_curve(y_test,y_pred)
fpr,tpr, thresh = roc_curve(y_test,y_pred)
plt.figure(figsize=(10,5))
plt.subplot(1, 2, 1)
plt.plot(rec,prec)
plt.xlabel('recall')
plt.ylabel('precision')
plt.subplot(1,2,2)
plt.plot(fpr,tpr)
plt.plot([1,0], [1,0], 'k--', lw = 2)
plt.xlabel('fpr')
plt.ylabel('tpr')
plt.show()
# F1 = 2 * (prec * rec) / (prec + rec)
thresh = list(thresh)
# thresh.append(1)
plt.plot(thresh,tpr)
plt.title('TPR Versus Threshold')
plt.ylabel('tpr')
plt.xlabel('Threshold')
plt.show()
plt.show()
# F1 = 2 * (prec * rec) / (prec + rec)
thresh = list(thresh)
# thresh.append(1)
plt.plot(thresh,fpr)
plt.title('FPR Versus Threshold')
plt.ylabel('fpr')
plt.xlabel('Threshold')
plt.show()
odds = np.exp(model.coef_[0])*np.sign(model.coef_[0])
sorted_index = odds.argsort()
fig, ax = plt.subplots(figsize=(6, 11))
width = 0.75 # the width of the bars
ind = np.arange(X_test.shape[1]) # the x locations for the groups
ax.set_yticks(ind+width/2)
ax.set_yticklabels(X_test.columns[sorted_index])
ax.barh(ind, odds[sorted_index])
plt.title('Odds Ratio w/ sign for each feature')
plt.show()
print("At threshold = 0.5")
# It is worse to class a customer as good when they are bad,
# than it is to class a customer as bad when they are good.
print(metrics.classification_report(y_test,y_pred > 0.5))
print('accuracy: ',metrics.accuracy_score(y_test,y_pred > 0.5))
return(model) | true |
680030d19b77e1e4d315cca6d6216bf5be908b8b | Python | Yey007/HOI4ImageGenerator | /generator.py | UTF-8 | 863 | 2.8125 | 3 | [] | no_license | import os
import sys
import errno
from PIL import Image
import glob
def main():
trymakedir("Large")
trymakedir("Medium")
trymakedir("Small")
files = glob.glob("*.png")
files.extend(glob.glob("*.jpg"))
files.extend(glob.glob("*.jpeg"))
for infile in files:
filename, ext = os.path.splitext(infile)
generatevariants(filename, ext)
def trymakedir(name):
if not os.path.exists(name):
os.mkdir(name)
def generatevariants(filename, ext):
img = Image.open(filename + ext)
imgLarge = img.resize((82, 52))
imgMedium = img.resize((41, 26))
imgSmall = img.resize((10, 7))
imgLarge.save(f"Large/{filename}_large{ext}", "PNG")
imgMedium.save(f"Medium/{filename}_medium{ext}", "PNG")
imgSmall.save(f"Small/{filename}_small{ext}", "PNG")
main()
| true |
ad7f3ea8fd63d1b418cc03e1344582063c54edcc | Python | alex-romanovskii/summareyez | /eyetrackergui.py | UTF-8 | 12,472 | 2.765625 | 3 | [] | no_license | from tkinter import *
from tkinter import messagebox
import os
from tkinter.ttk import Combobox
from PIL import Image, ImageTk
import random
import time
import pandas as pd
import numpy as np
class First_screen(Tk):
def __init__(self):
super().__init__()
self.config(cursor='circle red')
self.width = self.winfo_screenwidth()
self.height = self.winfo_screenheight()
self.background_photo=ImageTk.PhotoImage(Image.open("background.jpg"))
self.background = Label(self, image=self.background_photo)
self.background.place(x=0, y=0)
self.logo = ImageTk.PhotoImage(Image.open("logo.png"))
self.draw_logo = Label(self, image=self.logo)
self.draw_logo.place(relx=0.7, rely=0.1)
self.font=("helvetica Bold", 20)
self.texts=[text.split('.txt')[0] for text in os.listdir('texts')]
self.title("SummerEyes")
self.attributes('-fullscreen', True)
self.lbl_name = Label(self, text="Entry your name",font=self.font)
self.lbl_name.place(relx=0.1, rely=0.1)
self.txt_name = Entry(self, width=8,font=self.font)
self.txt_name.place(relx=0.3, rely=0.1)
self.lbl_age = Label(self, text="Entry your age",font=self.font)
self.lbl_age.place(relx=0.1, rely=0.2)
self.spin_age = Spinbox(self, from_=18, to=30, width=3,font=self.font)
self.spin_age.place(relx=0.3, rely=0.2)
self.lbl_sex = Label(self, text="Choose your sex",font=self.font)
self.lbl_sex.place(relx=0.1, rely=0.3)
self.combo_sex = Combobox(self,font=self.font,width=8)
self.combo_sex['values'] = ('Male','Female')
self.combo_sex.place(relx=0.3, rely=0.3)
self.lbl_choose_text = Label(self, text="Choose text",font=self.font)
self.lbl_choose_text.place(relx=0.1, rely=0.4)
self.combo = Combobox(self,font=self.font,width=8)
self.combo['values'] = self.texts
self.combo.place(relx=0.3, rely=0.4)
self.chk_state = BooleanVar()
self.chk_state.set(False)
self.chk = Checkbutton(self, text='I want to see gaze point', var=self.chk_state,font=self.font)
self.chk.place(relx=0.1, rely=0.5)
self.btn = Button(self, text="START", command=self.clicked,font=self.font)
self.btn.place(relx=0.1, rely=0.7)
self.btn_exit = Button(self, text="EXIT", command=self.destroy,font=self.font)
self.btn_exit.place(relx=0.2, rely=0.7)
def clicked(self):
user_name = self.txt_name.get()
user_age=self.spin_age.get()
user_sex=self.combo_sex.get()
user_text=self.combo.get()
points=self.chk_state.get()
if len(user_name)==0 or len(user_sex)==0 or len(user_text)==0:
messagebox.showinfo('Error', 'Try again')
else:
self.user_name = user_name
self.user_age=user_age
self.user_sex=user_sex
self.user_text_name=user_text
self.user_text=(open('texts/{}.txt'.format(self.user_text_name),'r')).read()
self.points=points
self.destroy()
class Create_text(Tk):
def __init__(self,user_name,text,text_name,user_gender,user_age,eye_tracker=False,see_rectangle=True,points=True,verbose=True):
super().__init__()
self.start_time=time.time()
self.config(cursor='circle red')
self.user_name=user_name
self.text_name=text_name
self.user_gender=user_gender
self.user_age=user_age
self.text=text
self.see_rectangle=see_rectangle
self.points=points
self.verbose=verbose
self.title("SummerEyes")
self.width = self.winfo_screenwidth() #get display width
self.height = self.winfo_screenheight() #get display height
self.attributes('-fullscreen', True)
self.font=("helvetica", 20)
self.canvas_background="white" #backgroun color
self.canvas = Canvas(self,bg=self.canvas_background,width=self.width, height=self.height)
self.canvas.pack() #necessarily
self.eye_tracker=eye_tracker # if x,y coordinates from 0 to 1 set eye_tracker=True to convert them to px
self.start_position_x=40 #start text position (x)
self.start_position_y=40 #start text position (y)
self.fixation_number=0
self.previous_fixation=None
self.bbox_info=None
self.print_text()
def print_text(self):
self.button_save = Button(self, text = "Save and Exit", command = self.quit, font=self.font,anchor=W)
self.button_save.place(relx=0.03, rely=0.9)
self.button_questions = Button(self, text = "Questions", command = self.questions, font=self.font,anchor=W)
self.button_questions.place(relx=0.8, rely=0.9)
bbox_info={}
for index,sentenсe in enumerate(self.text.split(".")):
if len(sentenсe)==0:
continue
sentenсe=sentenсe.lstrip()
positions=[]
for number,word in enumerate(sentenсe.split(" ")):
if len(word)==0:
continue
if number==len(sentenсe.split(" "))-1:
suffix='. '
else:
suffix=' '
sent_id = self.canvas.create_text(self.start_position_x, self.start_position_y,
text=word+suffix,font=self.font,
justify=LEFT, fill="black",anchor=NW)
bbox = self.canvas.bbox(sent_id)
if self.see_rectangle==True:
self.canvas.create_rectangle(bbox, outline="black") #draw word rectangles
width=self.start_position_x + bbox[2] - bbox[0] + 5
x_left=self.start_position_x
x_right=self.start_position_x+ bbox[2] - bbox[0]
y_up=self.start_position_y
y_down=self.start_position_y+ bbox[3] - bbox[1]
if width+120<self.width:
self.start_position_x += bbox[2] - bbox[0]
else:
self.start_position_x=40
self.start_position_y+=40
positions.append([x_left,x_right,y_up,y_down])
bbox_info[index]=[(sentenсe),(positions),(0),[]]
self.bbox_info=bbox_info
self.update()
def draw_point(self,x,y):
try:
self.canvas.delete(self.point)
except:
pass
self.point=self.canvas.create_oval(x-10, y-10, x, y, outline="#2541f4",width=5)
self.update()
def quit(self):
self.finish_time=time.time()
self.read_time=round(self.finish_time-self.start_time)
self.get_output(save=True)
messagebox.showinfo('File was saved', 'File was saved/n You read {} sec'.format(self.read_time))
self.destroy()
def questions(self):
self.finish_time=time.time()
self.read_time=round(self.finish_time-self.start_time)
self.get_output(save=False)
messagebox.showinfo('Questions', 'Start questions')
self.destroy()
self=QuestionScreen(self.text_name,self.output,self.user_name)
self.mainloop()
def get_bbox(self,x,y):
if self.eye_tracker==True:
x = (x*self.width)
y = (y*self.height)
if self.points==True:
self.draw_point(x,y)
for key,value in self.bbox_info.items():
positions=value[1]
for position in positions:
x_left=position[0]
x_right=position[1]
y_up=position[2]
y_down=position[3]
if x_left<=x<=x_right and y_up<=y<=y_down:
index=key
sentenсe=value[0]
positions=value[1]
fixations=value[2]+1
if self.previous_fixation!=index:
self.fixation_number+=1
self.previous_fixation=index
order=value[3]
order.append(self.fixation_number)
self.bbox_info[index]=[(sentenсe),(positions),(fixations),order]
if self.verbose==True:
print('Number sentence:{}, Sentence:{}, Order sentence:{}'.format(index,sentenсe,self.fixation_number))
self.update()
break
def get_output(self,save): # create dataframe from bbox_info
self.output=pd.DataFrame([(a,b[0],b[2],b[3]) for a,b in self.bbox_info.items()],columns=['index','sentenсe','count_fixation','fixation_order'])
self.output['count_words']=self.output['sentenсe'].apply(lambda x:len(x.split(' ')))
self.output['count_fixation_normalized']=self.output['count_fixation']/self.output['count_words']
self.output['user_name']=self.user_name
self.output['text']=self.text_name
self.output['fixation_order']=self.output['fixation_order'].apply(lambda x:list(set(x)))
self.output['Age']=self.user_age
self.output['Gender']=self.user_gender
self.output['Time']=self.read_time
if save==True:
self.output.to_csv('results/fixations_{}_{}.csv'.format(self.user_name,self.text_name),index=False)
display(self.output)
class QuestionScreen(Tk):
def __init__(self,text_name,user_df,user_name):
super().__init__()
self.configure(background='white')
self.logo = ImageTk.PhotoImage(Image.open("logo.png"))
self.draw_logo = Label(self, image=self.logo)
self.draw_logo.place(relx=0.7, rely=0.1)
self.attributes('-fullscreen', True)
self.text_name=text_name
questions=np.load("questions/questions.npy",allow_pickle=True).item()
self.questions=(i for i in questions[self.text_name])
self.first_question=True
self.correct_answers=[]
self.user_df=user_df
self.user_name=user_name
self.start()
def start(self):
if self.first_question==False:
self.user_answer=self.r_var.get()
if self.user_answer==self.right_question:
self.correct_answers.append(1)
else:
self.correct_answers.append(0)
self.first_question=False
try:
all_question=next(self.questions)
except:
for number_question,accuracy in enumerate(self.correct_answers,1):
self.user_df['Question {}'.format(number_question)]=accuracy
self.user_df.to_csv('results/fixations_{}_{}.csv'.format(self.user_name,self.text_name),index=False)
display(self.user_df)
messagebox.showinfo('Finished', 'You finished')
self.destroy()
self.question=all_question[0]
self.answ_1=all_question[1]
self.answ_2=all_question[2]
self.answ_3=all_question[3]
self.answ_4=all_question[4]
self.right_question=all_question[5]
self.lbl_name = Label(self, text="{}".format(self.question),font=("helvetica Bold", 20),bg='white')
self.lbl_name.place(relx=0.1, rely=0.25)
self.r_var = StringVar()
self.r_var.set(0)
self.rad1 = Radiobutton(self, text=self.answ_1, value=self.answ_1,variable=self.r_var,font=("helvetica Bold", 20),bg='white')
self.rad2 = Radiobutton(self, text=self.answ_2, value=self.answ_2,variable=self.r_var,font=("helvetica Bold", 20),bg='white')
self.rad3 = Radiobutton(self, text=self.answ_3, value=self.answ_3,variable=self.r_var,font=("helvetica Bold", 20),bg='white')
self.rad4 = Radiobutton(self, text=self.answ_4, value=self.answ_4,variable=self.r_var,font=("helvetica Bold", 20),bg='white')
self.rad1.place(relx=0.1, rely=0.4)
self.rad2.place(relx=0.1, rely=0.5)
self.rad3.place(relx=0.1, rely=0.6)
self.rad4.place(relx=0.1, rely=0.7)
self.btn_exit = Button(self, text="SUBMIT", command=self.start,font=("helvetica Bold", 20))
self.btn_exit.place(relx=0.1, rely=0.8)
self.btn_exit = Button(self, text="EXIT", command=self.destroy,font=("helvetica Bold", 20))
self.btn_exit.place(relx=0.8, rely=0.8) | true |
d8dd2fbe9f71e651b2681f55f91731252b62acb4 | Python | persesvilhena/python_studies | /outros/codigos2/Codigos/14 - servidor.py | UTF-8 | 263 | 2.578125 | 3 | [] | no_license | from socket import socket, AF_INET, SOCK_STREAM
HOST = ''
PORT = 2223
s = socket(AF_INET, SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1) # Numero de Conexoes
conn, addr = s.accept()
data = conn.recv(1024)
print data
conn.send('Mensagem do Servidor!')
conn.close()
| true |
4d4d1de990925ddd2419c6f73842e905ece3ceb2 | Python | paper-NLP/en-cy-bilingual-embeddings | /src/main_test.py | UTF-8 | 717 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | import data_manager
from argparse import ArgumentParser
from gensim.models import FastText,Word2Vec
import logging
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-c','--corpus', help='Corpus file', required=True)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
corpus = data_manager.ExampleCorpus(args.corpus)
for line in corpus:
print(line)
# Gets most frequent words
topk = corpus.get_topk_words(topk=100)
print('Most frequent words:')
for k in topk:
print(k)
ft = FastText(size=100, window=5, min_count=3, sentences=corpus, iter=10)
for a,b in ft.most_similar('felltithio'):
print(a,b)
| true |
116f766b8c8d98557c6fea40199f0e508002ff27 | Python | bmilenki/Connect-3-AI---Minimax | /main.py | UTF-8 | 842 | 2.796875 | 3 | [] | no_license | import util
import connect3 as c3
import human
import game
import agent
def main():
p1 = util.get_arg(1)
p2 = util.get_arg(2)
currState = c3.State()
if p1 == "human":
player1 = human.HumanPlayer("X")
elif p1 == "random":
player1 = agent.RandomPlayer("X")
elif p1 == "minimax":
player1 = agent.MinimaxPlayer("X")
else:
print("Player 1 has no agent of they type")
if p2 == "human":
player2 = human.HumanPlayer("O")
elif p2 == "random":
player2 = agent.RandomPlayer("O")
elif p2 == "minimax":
player2 = agent.MinimaxPlayer("O")
else:
print("Player 2 has no agent of they type")
currGame = game.Game(currState, player1, player2)
currGame.play()
if __name__ == "__main__":
main() | true |
f47884106136c76477a7b850dd2e4ff83b15b0b7 | Python | jennyshane/nn_demos | /perceptron.py | UTF-8 | 1,663 | 2.703125 | 3 | [] | no_license | import time
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import tensorflow
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras import optimizers
class1center=[3, 3]
class2center=[0, 0]
numsamples=2000
points1=class1center+np.random.randn(numsamples, 2)
points2=class2center+.5*np.random.randn(numsamples, 2)
plt.ion()
fig=plt.figure()
ax=fig.add_subplot(111)#, projection='3d')
#ax.scatter(points1[:, 0], points1[:, 1], points1[:, 2], c='r')
#ax.scatter(points2[:, 0], points2[:, 1], points2[:, 2], c='b')
ax.scatter(points1[:, 0], points1[:, 1], c='r')
ax.scatter(points2[:, 0], points2[:, 1], c='b')
plt.show()
plt.pause(0.01)
data=np.vstack((points1, points2))
labels=np.vstack((np.ones((numsamples, 1)), np.zeros((numsamples, 1))))
model=Sequential()
model.add(Dense(1, input_dim=2))
model.add(Activation('sigmoid'))
print(model.summary())
model.compile(optimizer='SGD', loss='binary_crossentropy', metrics=['accuracy'])
for i in range(0, 50):
w=model.get_weights()
a=w[0][0][0]
b=w[0][1][0]
c=w[1][0]
#fig=plt.figure()
#ax=fig.add_subplot(111)#, projection='3d')
#ax.scatter(points1[:, 0], points1[:, 1], c='r')
#ax.scatter(points2[:, 0], points2[:, 1], c='b')
#ax+by+c=0 ---> y=-(a/b)x-c/b
x=np.array([-1, 4])
y=-(a/b)*x-c/b
ax.clear()
ax.scatter(points1[:, 0], points1[:, 1], c='r')
ax.scatter(points2[:, 0], points2[:, 1], c='b')
ax.plot(x, y, 'g-')
plt.pause(0.01)
#plt.show()
model.fit(data, labels, epochs=1, batch_size=32)
| true |
5422a1d2e53d2bd052ff794da24c5fe02f44eafb | Python | emmernme/MENA-Compfys | /Project3/diff_plot.py | UTF-8 | 1,459 | 3.109375 | 3 | [] | no_license | """
Program to plot the results from the methods.
"""
import matplotlib.pyplot as plt
import numpy as np
N = np.linspace(5, 35, 13)
exact = 0.192765
c_lag = [0.170492, 0.154422, 0.177081, 0.187305, 0.193285, 0.194396,
0.194786, 0.194813, 0.194804, 0.194795, 0.194779, 0.194764, 0.194734]
diff_lag = [exact-c_lag[0], exact-c_lag[1], exact-c_lag[2], exact-c_lag[3],
exact-c_lag[4], exact-c_lag[5], exact-c_lag[6], exact-c_lag[7], exact-c_lag[8],
exact-c_lag[9], exact-c_lag[10], exact-c_lag[11], exact-c_lag[12]]
c_leg = [0.264249, 0.329525, 0.071980, 0.099032, 0.239088, 0.222933,
0.156139, 0.162727, 0.196817, 0.193524, 0.177283, 0.179292, 0.189923]
diff_leg = [exact-c_leg[0], exact-c_leg[1], exact-c_leg[2], exact-c_leg[3],
exact-c_leg[4], exact-c_leg[5], exact-c_leg[6], exact-c_leg[7], exact-c_leg[8],
exact-c_leg[9], exact-c_leg[10], exact-c_leg[11], exact-c_leg[12]]
plt.scatter(N, c_lag, label = 'Calculated Laguerre')
plt.scatter(N, c_leg, label = 'Calculated Legendre')
plt.axhline(y = exact, label = 'Exact value')
plt.legend()
plt.title('Calculated values vs number of iterations')
plt.xlabel('N[#]')
plt.ylabel('Calculated Integralvalue')
plt.show()
plt.scatter(N, diff_lag, label = 'Diff Laguerre')
plt.scatter(N, diff_leg, label = 'Diff Legendre')
plt.axhline(y = 0)
plt.legend()
plt.title('Diff between exact and calculated value vs number of iterations')
plt.xlabel('N[#]')
plt.ylabel('Diff (exact - calculated)')
plt.show()
| true |
04a883b0f84e725d40b3f90320c8acc96d89fb96 | Python | feiyuerenhai/python-basics | /02-列表.py | UTF-8 | 969 | 4.5625 | 5 | [] | no_license | #!/usr/bin/python
#coding=utf-8
#列表,基本上就是JavaScript中的数组
#列表可包含多种类型的数据
arr = ['test', 42, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']]
#使用in进行存在性检查
print 'test' in arr
#取数
print arr[1]
#分片操作
sub_arr = arr[2]
#从2取到8,每隔3个取一个
print sub_arr[2:8:3]
#list方法可以将字符串还原为列表
arr3 = list('love')
arr4 = list('hate')
#列表可以相加,即concat,也可以进行乘法运算,重复多遍
print arr3 + arr4 * 4
arr5 = list('hello')
#删除列表元素
del arr5[1]
print arr5
#统计元素出现次数
print arr5.count('l')
#查找第一次出现位置
print arr5.index('o')
#元组
#元组,可以理解为参数排列,使用逗号分隔一些数值,即创建元组,带不带 () 都可以
arr6 = 1, 'world', 3
#以类似于列表的方式取数
print arr6[1]
#tuple函数可以将列表转换为元组
print tuple([1,4,7]) | true |
201a44d6a31f8698c2d63e79a52b1b69be0eb79c | Python | Hitoki/ieee | /profiler.py | UTF-8 | 2,016 | 3.25 | 3 | [] | no_license | from logging import debug as log
import time
from util import odict
class Profiler:
"""
Used to manually profile functions and see where time is being spent.
Usage:
p = Profiler('page number 1')
# ...
p.tick('Before action X')
# ...
p.tick('Before action Y')
# ...
for i in range(10):
p.start_loop()
# ...
p.tick('Before action Z')
# ...
p.end_loop()
Prints all output to the log.
"""
def __init__(self, name=''):
self.name = name
self.is_loop = False
log('profile %s: -- start ------------' % name)
self.start_time = time.time()
self.loop_start_time = None
self.last_loop_time = None
def __del__(self):
log('profile %s: %0.2fs -- end ----------' %
(self.name, time.time() - self.start_time))
def tick(self, name):
if self.is_loop:
if name not in self.ticks:
self.ticks[name] = 0
now = time.time()
seconds = now - self.last_loop_time
self.last_loop_time = now
self.ticks[name] += seconds
else:
log('profile %s: %0.2fs - %s' %
(self.name, time.time() - self.start_time, name))
def start_loop(self):
self.is_loop = True
self.loop_start_time = time.time()
self.last_loop_time = self.loop_start_time
self.ticks = odict()
log('profile %s: %0.2fs - START LOOP' %
(self.name, time.time() - self.start_time))
def end_loop(self):
self.is_loop = False
self.loop_start_time = None
for name, value in self.ticks.items():
log('profile %s: %0.2fs - total for loop tick "%s"' %
(self.name, value, name))
log('profile %s: %0.2fs - END LOOP' %
(self.name, time.time() - self.start_time))
| true |
8cd01ef0f166ba714d4c840e4a329de7d0413e19 | Python | somchaisomph/NN | /nn/activators.py | UTF-8 | 1,228 | 3.21875 | 3 | [] | no_license | import numpy as np
class ReLU:
def forward(self,X):
z = np.zeros(X.shape)
return np.maximum(X,z)
def backward(self,X):
p1 = self.forward(X)
ones = np.ones(p1.shape)
prime = np.minimum(p1,ones)
return prime
class Sigmoid:
def forward(self, X):
return 1.0 / (1.0 + np.exp(-X))
def backward(self, X, top_diff):
output = self.forward(X)
return (1.0 - output) * output * top_diff
class Tanh:
def forward(self, X):
return np.tanh(X)
def backward(self, X, top_diff):
output = self.forward(X)
return (1.0 - np.square(output)) * top_diff
class Softmax:
def predict(self, X):
exp_scores = np.exp(X)
#return exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return exp_scores / np.sum(exp_scores, keepdims=True)
def loss(self, X, y):
num_examples = X.shape[0]
probs = self.predict(X)
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
return 1./num_examples * data_loss
def diff(self, X, y):
# reference : https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/
num_examples = X.shape[0] # number of data records in train set
probs = self.predict(X)
probs[range(num_examples), y] -= 1
return probs
| true |
8fb0e073091dc4baca56590c7cf56a05d1ed187a | Python | Washington-University/HCPpipelinesXnatPbsJobs | /lib/utils/delete_all_resources_by_name.py | UTF-8 | 3,199 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
utils/delete_all_resources_by_name.py: Program to delete all DB resources
of a given name for all sessions in a given ConnectomeDB project."
"""
# import of built-in modules
import glob
import os
import sys
# import of third party modules
# import of local modules
import utils.delete_resource as delete_resource
import utils.my_argparse as my_argparse
import utils.os_utils as os_utils
import xnat.xnat_archive as xnat_archive
# authorship information
__author__ = "Timothy B. Brown"
__copyright__ = "Copyright 2016, The Human Connectome Project"
__maintainer__ = "Timothy B. Brown"
def _inform(msg):
"""Inform the user of this program by outputing a message that is prefixed
by the file name.
"""
print(os.path.basename(__file__) + ": " + msg)
def main():
# create a parser object for getting the command line arguments
parser = my_argparse.MyArgumentParser(
description="Program to delete all DB resources of a given name for all sessions in a given ConnectomeDB project.")
# mandatory arguments
parser.add_argument('-u', '--user', dest='user', required=True, type=str)
parser.add_argument('-pw', '--password', dest='password', required=True, type=str)
parser.add_argument('-pr', '--project', dest='project', required=True, type=str)
parser.add_argument('-r', '--resource', dest='resource', required=True, type=str)
# optional arguments
parser.add_argument('-ser', '--server', dest='server', required=False,
default='http://' + os_utils.getenv_required('XNAT_PBS_JOBS_XNAT_SERVER'),
type=str)
parser.add_argument('-f', '--force', dest='force', action='store_true', required=False, default=False)
# parse the command line arguments
args = parser.parse_args()
# show parsed arguments
_inform("Parsed arguments:")
_inform(" Username: " + args.user)
_inform(" Password: " + "*** password mask ***")
_inform(" Server: " + args.server)
_inform(" Project: " + args.project)
_inform(" Resource: " + args.resource)
_inform(" Force: " + str(args.force))
# find all instances of the specified resource in the specified project
my_xnat_archive = xnat_archive.XNAT_Archive()
archive_root = my_xnat_archive.project_archive_root(args.project)
dir_list = glob.glob(archive_root + os.sep + '*')
for directory in sorted(dir_list):
resource_dir_to_look_for = directory + os.sep + 'RESOURCES' + os.sep + args.resource
if os.path.isdir(resource_dir_to_look_for):
unprefixed = resource_dir_to_look_for.replace(archive_root + os.sep, "")
sep_loc = unprefixed.find(os.sep)
session = unprefixed[:sep_loc]
underscore_loc = session.find('_')
subject = session[:underscore_loc]
_inform("Deleting resource: " + args.resource + " for session: " + session)
delete_resource.delete_resource(args.user, args.password, args.server,
args.project, subject, session,
args.resource, args.force)
if __name__ == '__main__':
main()
| true |
081d5c9a1420b37803abdde23ccc167badd79d13 | Python | TDA/spc-leetcodeOJ | /src/powerof4.py | UTF-8 | 246 | 2.875 | 3 | [] | no_license | import re
__author__ = 'saipc'
regex = re.compile(r"^0*10*$")
item = "00011000"
item2 = "00001000"
if regex.search(item):
x = regex.search(item)
print x.group(0)
if regex.search(item2):
x = regex.search(item2)
print x.group(0) | true |
0b5c976590b2fd39e48b367f1435529ca939f66d | Python | RoboISM/Roboism | /mainsite/forms.py | UTF-8 | 4,210 | 2.515625 | 3 | [] | no_license | import re
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from .models import *
class RegistrationForm(forms.Form):
username = forms.RegexField(regex=r'^\w+$', required=True, max_length=30, widget=forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}), label=_("Username"), error_messages={ 'invalid': _("This value must contain only letters, numbers and underscores.") })
email = forms.EmailField(required=True, max_length=30, widget=forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}), label=_("Email address"))
password1 = forms.CharField(required=True, max_length=30, widget=forms.PasswordInput(attrs={'class':'inputfield w3-input w3-border'}), label=_("Password"))
password2 = forms.CharField(required=True, max_length=30, widget=forms.PasswordInput(attrs={'class':'inputfield w3-input w3-border'}), label=_("Password (again)"))
def clean_username(self):
try:
user = User.objects.get(username__iexact=self.cleaned_data['username'])
except User.DoesNotExist:
return self.cleaned_data['username']
raise forms.ValidationError(_("The username already exists. Please try another one."))
def clean(self):
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields did not match."))
return self.cleaned_data
class MemberForm(forms.ModelForm):
class Meta:
model = Member
fields = ('pic','name','branch','work','DOB','year','bio','linkedin','resume', 'active')
labels = {
'work':_('Place of Work'), 'DOB':_('Date of Birth'), 'bio':_('A little about Yourself'), 'linkedin':_('Your Linkedin Profile URL'),
}
help_texts = {
'branch': _('Your current branch'), 'work':_('Keep empty if you are not employed somewhere.'), 'year':_('Your current Year, write like "First Year", or "Third Year", avoid all lowercase'),
'bio':_('What you write will show a glimpse about you'), 'resume':_('Attach your Resume, you can upload it later also'), 'active':_('Tick if you are not Alumni'),
}
widget = {
'name': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
'branch': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
'work': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
'DOB': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
'year': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
'bio': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
'linkedin': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
'resume': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
'active': forms.TextInput(attrs={'class':'inputfield w3-input w3-border'}),
}
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = '__all__'
labels = {
'pic':_('Project Picture'), 'name':_('Project Name'), 'github':_('Github Link'),
}
help_texts = {
'pic':_('If present'), 'github':_('If present'), 'completed':_('Tick if project is complete'), 'contributers':_('Write the names of Members who were involved in the project correctly.')
}
widget = {
'pic': forms.TextInput(attrs={'class':'w3-input w3-border'}),
'name': forms.TextInput(attrs={'class':'w3-input w3-border'}),
'description': forms.TextInput(attrs={'class':'w3-input w3-border'}),
'github': forms.TextInput(attrs={'class':'w3-input w3-border'}),
'completed': forms.TextInput(attrs={'class':'w3-input w3-border'}),
'contributers': forms.TextInput(attrs={'class':'w3-input w3-border'}),
}
class ExpoProjectForm(forms.ModelForm):
class Meta:
model = ExpoProject
fields = '__all__'
| true |
e47bf934f7219f6f914932985aa853bf88fec546 | Python | stephendsm/general | /python/pytorial/classesNobjects.py | UTF-8 | 2,068 | 4.84375 | 5 | [] | no_license | # Make a group of similar variables and functions together
class Enemy: # Naming begin with a captial letter is a common practice, differentiate btw noral variable and class
life = 3 # each enemy has a life of 3, ofcoz this life variable is part of 'Enamy' class
# Make a couple function for this class 'Enemy'
def attack(self): # this is just some what happens whenever we attack an enemy
#(self) = object, i.e use something from this own class 'Enemy'
#self.attack means use the attack function in this class 'Enemy'
#self.life means use the life variable in this class 'Enemy'
print("ouch!")#dead of something
#since one of the enemy dead, ofcoz it needs to be substracted
#cant just do like this(life -= 1)
self.life -= 1 #that is how we access the variables inside our class 'Enemy'
#so pretty much 'self' is saying ok, inside this class 'Enemy', take away 1 from the 'life' variable
def checklife(self):
if self.life <= 0: # '<=0' bcoz if the enemy life of 5 was slash with a weapon, it would be '-2' or something so..
print("I am dead")
else:
print(str(self.life) + 'life left')
#In order to use anything inside our class, we need to access it a specialy way(cant do like this..attack())
#That is by creating something called an 'object'
#Object: it's pretty much a way that we can access the stuff inside our class
#So the first thing we do is we pretty much act like we are making a normal variable
#So I'm going to make a object called 'enemy1' and set = to the class that you want to use stuff from
enemy1 = Enemy()
enemy1.attack()
enemy1.checklife()
# cool thing to notice
# Each object(i.e 'enemy1' is one object) is independent of one another
enemy2 = Enemy()
enemy2.attack()
enemy2.checklife()
# One class is pretty much a templete of how do you want (to code the enemy/it to behave)
# i.e You can create as many of them as you want just by making an object for each one | true |
e3d59178d499d753be064f18b2813fd85e712391 | Python | shaunakbhanarkar/Analysis-of-Robotic-Behaviour-using-TurtleBot | /Turtlebot.py | UTF-8 | 6,531 | 2.8125 | 3 | [] | no_license | import rospy
from geometry_msgs.msg import Twist
import copy
from math import pi
#Tiles are 2*2 feet
def move_circle():
rospy.init_node('Node1',anonymous=True)
#Copy the initial position
##initial_turtlebot_odom_pose = copy.deepcopy(turtlebot_odom_pose)
# Create a publisher which can "talk" to Turtlesim and tell it to move
pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
#First Circle
# Create a Twist message and add linear x and angular z values
move_cmd = Twist()
move_cmd.linear.x = 0.1 #ye maine li hai
move_cmd.angular.z = 0.2 # radius = 0.5m v=rw
# Save current time and set publish rate at 10 Hz
now = rospy.Time.now()
rate = rospy.Rate(100)
# For the next 15.708 seconds publish cmd_vel move commands to Turtlesim
while rospy.Time.now() < now + rospy.Duration.from_sec(10*pi):
pub.publish(move_cmd)
rate.sleep()
#Stopping First Circle
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
pub.publish(move_cmd)
#First Rotation
# Create a Twist message and add linear x and angular z values
#move_cmd = Twist()
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.2 # radius = 0.5m v=rw
# Save current time and set publish rate at 10 Hz
now = rospy.Time.now()
rate = rospy.Rate(100)
# For the next 3.927 seconds publish cmd_vel move commands to Turtlesim
while rospy.Time.now() < now + rospy.Duration.from_sec(1.25*pi*2):
pub.publish(move_cmd)
rate.sleep()
#Stopping First Rotation
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
pub.publish(move_cmd)
#First Diagonal Cross
# Create a Twist message and add linear x and angular z values
#move_cmd = Twist()
move_cmd.linear.x = 0.1 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
# Save current time and set publish rate at 10 Hz
now = rospy.Time.now()
rate = rospy.Rate(100)
# For the next 5 seconds publish cmd_vel move commands to Turtlesim
while rospy.Time.now() < now + rospy.Duration.from_sec(5*2):
pub.publish(move_cmd)
rate.sleep()
#Stopping First Diagonal Cross
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
pub.publish(move_cmd)
#Second Rotation
# Create a Twist message and add linear x and angular z values
#move_cmd = Twist()
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = -0.2 # radius = 0.5m v=rw
# Save current time and set publish rate at 10 Hz
now = rospy.Time.now()
rate = rospy.Rate(100)
# For the next 3.927 seconds publish cmd_vel move commands to Turtlesim
while rospy.Time.now() < now + rospy.Duration.from_sec(1.25*pi *2):
pub.publish(move_cmd)
rate.sleep()
#Stopping Second Rotation
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
pub.publish(move_cmd)
#Second Circle
# Create a Twist message and add linear x and angular z values
#move_cmd = Twist()
move_cmd.linear.x = 0.1 #ye maine li hai
move_cmd.angular.z = -0.2 # radius = 0.5m v=rw
# Save current time and set publish rate at 10 Hz
now = rospy.Time.now()
rate = rospy.Rate(100)
# For the next 15.708 seconds publish cmd_vel move commands to Turtlesim
while rospy.Time.now() < now + rospy.Duration.from_sec(10*pi):
pub.publish(move_cmd)
rate.sleep()
#Stopping Second Circle
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
pub.publish(move_cmd)
#Third Rotation
# Create a Twist message and add linear x and angular z values
#move_cmd = Twist()
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = -0.2 # radius = 0.5m v=rw
# Save current time and set publish rate at 10 Hz
now = rospy.Time.now()
rate = rospy.Rate(100)
# For the next 3.927 seconds publish cmd_vel move commands to Turtlesim
while rospy.Time.now() < now + rospy.Duration.from_sec(1.25*pi *2):
pub.publish(move_cmd)
rate.sleep()
#Stopping Third Rotation
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
pub.publish(move_cmd)
#Second Diagonal Cross
# Create a Twist message and add linear x and angular z values
#move_cmd = Twist()
move_cmd.linear.x = 0.1 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
# Save current time and set publish rate at 10 Hz
now = rospy.Time.now()
rate = rospy.Rate(100)
# For the next 5 seconds publish cmd_vel move commands to Turtlesim
while rospy.Time.now() < now + rospy.Duration.from_sec(5*2):
pub.publish(move_cmd)
rate.sleep()
#Stopping Second Diagonal Cross
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
pub.publish(move_cmd)
#Fourth Rotation
# Create a Twist message and add linear x and angular z values
#move_cmd = Twist()
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.2 # radius = 0.5m v=rw
# Save current time and set publish rate at 10 Hz
now = rospy.Time.now()
rate = rospy.Rate(100)
# For the next 3.927 seconds publish cmd_vel move commands to Turtlesim
while rospy.Time.now() < now + rospy.Duration.from_sec(1.25*pi *2):
pub.publish(move_cmd)
rate.sleep()
#Stopping Fourth Rotation
move_cmd.linear.x = 0.0 #ye maine li hai
move_cmd.angular.z = 0.0 # radius = 0.5m v=rw
pub.publish(move_cmd)
#Calculate final distance
##distance_moved = abs(0.5 * sqrt(((turtlebot_odom_pose.pose.pose.position.x-initial_turtlebot_odom_pose.pose.pose.position.x) ** 2)+((turtlebot_odom_pose.pose.pose.position.y-initial_turtlebot_odom_pose.pose.pose.position.y) ** 2)))
##print("Total Distance Moved = ", distance_moved)
if __name__ == '__main__':
try:
move_circle()
except rospy.ROSInterruptException:
pass | true |
6ae8f0f3a59bb40741b787cce9d1c727d970bd25 | Python | PatrickKutch/FUDD | /Fudd.py | UTF-8 | 4,440 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | ##############################################################################
# Copyright (c) 2017 Patrick Kutch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# File Abstract:
# Application that merges, modifies BIFF safe files from Oscar.
#
##############################################################################
import argparse
import os
import sys
import logging
import pickle
import xml.dom.minidom
from xml.parsers.expat import ExpatError
from Helpers import Log
from Helpers import FileHandler
from Helpers import VersionMgr
def existFile(filename):
if not os.path.exists(filename):
Log.getLogger().error("Specified file: " + str(filename) + " does not exist.")
return False
return True
def ReadConfigFile(fileName,outfile):
if not existFile(fileName):
return False
#open the xml file for reading:
file = open(fileName,'r')
#convert to string:
data = file.read()
#close file because we dont need it anymore:
file.close()
inputList=[]
try:
domDoc = xml.dom.minidom.parseString(data)
sourceList = domDoc.getElementsByTagName('Source')
# run through quickly and verify input files are specified and exist
for source in sourceList:
if "File" in source.attributes:
sourceFile = source.attributes["File"].nodeValue
if not existFile(sourceFile):
return False
else:
Log.getLogger().error("No File specified for source")
resultList=[]
appendList=[]
lastTime = 0
for source in sourceList:
fHandler = FileHandler.FileHandler(source)
if fHandler.insertTime == "Append":
appendList.append(fHandler)
else:
resultList = FileHandler.mergeLists(resultList,fHandler.createMergedList())
if len(resultList) > 0:
lastTime = resultList[-1].ArrivalTime
for fHandler in appendList:
resultList = FileHandler.mergeLists(resultList,fHandler.createMergedList(lastTime + 1))
lastTime = resultList[-1].ArrivalTime
try:
with open(outfile,'w+b') as fp:
pickle.dump(resultList, fp, pickle.DEFAULT_PROTOCOL)
print("New file [" + outfile + "] created with " + str(len(resultList)) + " entries.")
except Exception as ex:
print(str(ex))
return False
except pickle.UnpicklingError:
return False
except Exception as ex:
Log.getLogger().error("Bad Content - XML error: " + str(ex))
return False
return True
def main():
if not HandleCommandlineArguments():
return
def HandleCommandlineArguments():
print("FUDD - BIFF Save File Editor Version " + VersionMgr.ReadVer())
if sys.version_info < (3, 3):
print("---- Error: Required Python 3.3 or greater ---")
return False
parser = argparse.ArgumentParser(description='FUDD the fearful')
parser.add_argument("-i","--input",help='specifies application configuration file file',type=str,required=True)
parser.add_argument("-o","--output",help='specifies file to generate',type=str,required=True)
parser.add_argument("-l","--logfile",help='specifies log file name',type=str)
parser.add_argument("-v","--verbose",help="prints debug information",action="store_true")
try:
args = parser.parse_args()
except:
return False
if None != args.logfile:
Log.setLogfile(args.logfile)
if False == args.verbose:
Log.setLevel(logging.ERROR)
else:
Log.setLevel(logging.INFO)
Log.getLogger().info("")
ReadConfigFile(args.input,args.output)
if __name__ == '__main__':
main()
| true |
bcee12ca6844d52c608a4cbc669ef901c34b5059 | Python | DagoPeralta94/CursoPythonPlatzi | /decomposicion.py | UTF-8 | 1,051 | 3.5625 | 4 | [] | no_license | class Automovil:
def __init__(self, modelo, marca, color):
self.modelo = modelo
self.marca = marca
self.color = color
self._estado = "en_reposo"
self._motor = Motor(cilindros=4)
print(f'Modelo: {self.modelo} - Marca: {self.marca} - Color: {self.color}')
def acelerar(self, tipo='despacio'):
if tipo == 'rapida':
self._motor.inyecta_gasolina(10)
else:
self._motor.inyecta_gasolina(3)
self.estado = 'en_movimiento'
print(f'Estado: {self.estado} - Tipo de aceleración: {tipo}')
class Motor:
def __init__(self, cilindros, tipo='gasolina'):
self.cilindros = cilindros
self.tipo = tipo
self._temperatura = 0
def inyecta_gasolina(self, cantidad):
self._temperatura = 30
print(f'Cantidad de inyección: {cantidad} - Cilindros: {self.cilindros} - Tipo de combustiión: {self.tipo} - Temperatura: {self._temperatura} grados')
auto = Automovil('2019', 'MAZDA', 'AZUL')
auto.acelerar('rapida')
| true |
3b2030b8c3e55c3475257b206bb264b7bcfc1981 | Python | Chaitra-21/PYTHON-BASIC-CODES | /cinema.py | UTF-8 | 717 | 3.828125 | 4 | [] | no_license | films={ "Finding Doru":[3,5],
"Bourne":[12,5],
"Tarzan":[15,4],
"Ghost Buster":[12,6]
}
while True:
choice=input("Which film you want to watch?: ").strip().title()
if choice in films:
age=int(input("How old are you?: ").strip())
#check user age
if age>=films[choice][0]:
#check seats
if films[choice][1]>0:
print("Enjoy!!")
films[choice][1]=films[choice][1]-1
else:
print("No more seats available")
else:
print("You are too young to watch {}".format(choice))
else:
print("We don't have that film :(")
| true |
608807740ca1f5c093e5bbc7f91ff4ce1a24a7e3 | Python | gregorylburgess/makahiki | /makahiki/scripts/verify.py | UTF-8 | 1,446 | 2.546875 | 3 | [] | no_license | #!/usr/bin/python
"""Invocation: scripts/verify.py
Runs pep8, pylint, and tests.
If all are successful, there is no output and program terminates normally.
If any errors, prints output from unsuccessful programs and exits with non-zero error code.
"""
import sys
import os
import getopt
def main(argv):
"""Verify main function. Usage: verify.py [-v | --verbose]"""
verbose = 0
try:
opts, _ = getopt.getopt(argv, "v", ["verbose"])
except getopt.GetoptError:
print "Usage verify.py [-v | --verbose]"
sys.exit(2)
for opt, _ in opts:
if opt in ("-v", "--verbose"):
verbose = 1
if verbose == 1:
print "running pep8"
pep8_command = os.path.join("scripts", "run_pep8.sh")
status = os.system(pep8_command)
if status:
sys.exit(1)
if verbose == 1:
print "running pylint"
pylint_command = os.path.join("scripts", "run_pylint.sh")
status = os.system(pylint_command)
if status:
sys.exit(1)
if verbose == 1:
print "cleaning"
os.system("python manage.py clean_pyc")
if verbose == 1:
print "running tests"
status = os.system("python manage.py test")
if status:
sys.exit(1)
if verbose == 1:
print "building docs"
status = os.system("pushd .; cd ../doc; make clean html; popd;")
if status:
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
| true |
6bdab68868e0885ddc2358132056c4fb9e1b2e32 | Python | summercake/Python_Jose | /11.If.py | UTF-8 | 422 | 3.765625 | 4 | [] | no_license | # if case1:
# perform action1
# elif case2:
# perform action2
# else:
# perform action3
if True:
print('It was Ture')
x = False
if x:
print('x was false')
else:
print('I will print x is anything not True')
loc = 'Bank'
if loc == 'Auto Shop':
print('loc is Auto Shop')
elif loc == 'Bank':
print('loc is Bank')
elif loc == 'Mall':
print('loc is Mall')
else:
print('where are u?')
| true |
f647062c093159a42a7a87f9be25ba636ddb5d4c | Python | minseunghwang/YouthAcademy-Python-Mysql | /작업폴더/09_Set/main.py | UTF-8 | 1,755 | 3.828125 | 4 | [] | no_license | # Set
# 파이썬에서 집합 처리를 위한 요소
# 중복을 허용하지 않고, 순서 혹은 이름으로 기억장소를 관리하지 않는다.
# set 생성
set1 = {}
set2 = set()
print(f'set1 type : {type(set1)}')
print(f'set2 type : {type(set2)}')
print(f'set2 : {set2}')
set3 = {10, 20, 30, 40, 50}
print(f'set3 : {set3}')
print(f'set3 type : {type(set3)}')
# 중복 불가능 (중복제거용도로 사용)
print('중복 No---------------')
set4 = {10, 10, 10, 20, 20, 20, 30, 30, 30}
print(f'set4 : {set4}')
print('추가 -----------------')
set5 = set()
set5.add(10)
set5.add(20)
set5.add(30)
print(f'set5 : {set5}')
# 중복된 값은 안드가유~
set5.add(10)
set5.add(10)
set5.add(20)
print(f'set5 : {set5}')
print('---------------------')
# set, list -> tuple로 변환
# tuple이 값을 가져오는 속도가 빠르기 때문
# 소괄호 () 로 묶여있으면 튜플 입니다~!
list10 = [10, 20, 30, 40, 50]
set10 = {10, 20, 30, 40, 50}
tuple10 = tuple(list10)
tuple11 = tuple(set10)
print(f'tuple10 : {tuple10}')
print(f'tuple11 : {tuple11}')
# tuple -> list
print('--------tuple -> list--------')
# 관리할 데이터를 추가하거나 삽입, 삭제, 수정을 위해서
list20 = list(tuple10)
print(f'list20 : {list20}')
# set -> list, tuple
print('--------set -> list, tuple--------')
# 인덱스 번호로 데이터를 관리하기 위한 목적
list21 = list(set10)
print(f'list21 : {list21}')
# list, tuple -> set
print('---------list, tuple -> set---------')
# 중복 제거 목적
# 주의@@@ 순서가 섞일 수 있음@@@
tuple100 = (10, 10, 10, 20, 20, 30, 30, 30)
list100 = [10, 10, 10, 20, 20, 30, 30, 30]
set30 = set(tuple100)
set31 = set(list100)
print(f'set30 : {set30}')
print(f'set31 : {set31}') | true |
95748e61bd8fbdf971a71aadb90a597003a4e1c1 | Python | karslio/PYCODERS | /Assignments-02/rotated_list.py | UTF-8 | 399 | 3.875 | 4 | [] | no_license | listElements = []
slip = int(input("how many index you will slip left"))
print("to stop the program please enter 'q'")
while True:
value = input("Enter list element: ").lower()
if value == 'q':
break
else:
listElements.append(value)
print(listElements)
newList = listElements[slip:] + listElements[:slip]
print("The new order of list is: " + " ".join(map(str, newList)))
| true |
c0f66396a906b90575ee38d99c46fcf3cec8fbed | Python | BryannaSav/PythonOOPExercises | /MathDojo.py | UTF-8 | 853 | 3.671875 | 4 | [] | no_license | class MathDojo(object):
def __init__(self):
pass
self.tot=0
def add(self, *num):
self.num=num
for i in range (0,len(num)):
if isinstance(num[i], (list,tuple)):
for j in range(0,len(num[i])):
self.tot = self.tot + num[i][j]
else:
self.tot = self.tot + num[i]
return self
def subtract(self, *num):
self.num=num
for i in range (0,len(num)):
if isinstance(num[i], (list,tuple)):
for j in range(0,len(num[i])):
self.tot = self.tot - num[i][j]
else:
self.tot = self.tot - num[i]
return self
def result(self):
print self.tot
return self
example=MathDojo()
example.add([2,4],6,(2,2)).subtract(2,[4,6]).result()
| true |
81a8d9abf73a544c2f6a72c54e94c47dbfb48245 | Python | HeyMikeMarshall/python-challenge | /PyPoll/main.py | UTF-8 | 2,028 | 3.40625 | 3 | [] | no_license | import os
import csv
election_data = os.path.join(".", "election_data.csv")
output_dir = os.path.join(".", "results.txt")
## initialize results.txt
with open(output_dir, "w+") as text_file:
print("", file=text_file)
ttl_vote = 0
candidates = []
canid = -1
tallys = []
winner = 0
compline = []
##funtion to output print lines to text file
def tolog(text):
print(text)
with open(output_dir, "a+") as text_file:
print(text, file=text_file)
# O's! Say does that star spangled etc.
flag = (f"""
* * * * * * ---------------
* * * * * ---------------
* * * * * * ---------------
* * * * * ---------------
---------------------------
---------------------------
---------------------------
-= Election Results =-
%%%%%%%%%%%%%%%%%%%%%%%%%%%
""")
#print the flag
tolog(flag)
#open election data, tally total votes and add candidates to list
with open(election_data, newline='') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
for row in reader:
ttl_vote += 1
if row[2] not in candidates:
candidates.append(row[2])
#print total vote counts
tolog(f"Total Votes: {ttl_vote}")
tolog(f"---------------------------")
#tally each candidate. takes a couple of seconds, this is where you appreciate the flag.
for candidate in candidates:
tallys.append(0)
canid += 1
with open(election_data, newline='') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
for row in reader:
if row[2] == candidates[canid]:
tallys[canid] += 1
pct = ((tallys[canid] / ttl_vote) * 100)
if tallys[canid] > winner:
winner = tallys[canid]
winname = candidate
#for each candidate print that candidate data to screen and text file.
tolog(f"{candidate}: %{round(pct, 3)} ({tallys[canid]})")
#WE GOT A WINNER!
winlog = (f"""---------------------------
Winner: {winname}
---------------------------""")
#print the winlog to screen and text file, call it a day.
tolog(winlog) | true |
d2906c40d8aef2b1be36f51374bdac2e1a26893c | Python | DSJacq/Miscellaneous | /HackerRank/Python/collections_namedtuple.py | UTF-8 | 960 | 3.4375 | 3 | [] | no_license | from collections import namedtuple
# exemple 1
Point = namedtuple('Point','x,y')
pt1 = Point(1,2)
pt2 = Point(3,4)
dot_product = ( pt1.x * pt2.x ) +( pt1.y * pt2.y )
print(dot_product)
# exemple 2
Car = namedtuple('Car','Price Mileage Colour Class')
xyz = Car(Price = 100000, Mileage = 30, Colour = 'Cyan', Class = 'Y')
print(xyz)
Car(Price=100000, Mileage=30, Colour='Cyan', Class='Y')
print(xyz.Class)
# Case
import collections, statistics
print('%.2f' % statistics.mean(next((int(student(*row).MARKS)
for row in (input().split()
for i in range(size)))
for size,
student in [[int(input()), collections.namedtuple('Student', input())]])))
# input
# 5
# ID MARKS NAME CLASS
# 1 97 Raymond 7
# 2 50 Steven 4
# 3 91 Adrian 9
# 4 72 Stewart 5
# 5 80 Peter 6
| true |
971059b90c201c8aa68eee8907da7e1c3cb1f647 | Python | jjhenkel/averloc | /models/pytorch-seq2seq/seq2seq/evaluator/metrics.py | UTF-8 | 3,356 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | import sys, os
import numpy as np
import tqdm
try:
from bleu import moses_multi_bleu
except:
from seq2seq.evaluator.bleu import moses_multi_bleu
def calculate_metrics_from_files(pred_file, labels_file, verbose=False):
f_pred = open(pred_file, 'r')
f_true = open(labels_file, 'r')
hypotheses = f_pred.readlines()
references = f_true.readlines()
f_pred.close()
f_true.close()
a = calculate_metrics(hypotheses, references, verbose)
for m in a:
print('%s: %.3f'%(m,a[m]))
print()
def get_freqs(pred, true):
all_words = set(pred+true)
d_pred = {x: pred.count(x) for x in all_words}
d_true = {x: true.count(x) for x in all_words}
return d_pred, d_true
def calculate_metrics(y_pred, y_true, verbose=False, bleu=False):
'''
Calculate exact match accuracy, precision, recall, F1 score, word-level accuracy
y_pred and y_true are lists of strings
function returns dict with the calculated metrics
'''
N = min(len(y_pred),len(y_true))
# N = 4500
if len(y_pred)!=len(y_true):
print('Warning: The number of predictions and ground truths are not equal, calculating metrics over %d points'%N)
# for precision, recall, f1
tp = 0
fp = 0
fn = 0
# for exact match
exact_match = 0
# for word-level accuracy
correct_words = 0
total_words = 0
if verbose:
a = tqdm.tqdm(range(N))
else:
a = range(N)
for i in a:
# print(i)
pred = y_pred[i].split()
true = y_true[i].split()
total_words += len(true)
for j in range(min(len(true), len(pred))):
if pred[j]==true[j]:
correct_words += 1
d_pred, d_true = get_freqs(pred, true)
if pred == true:
exact_match += 1
# print(d_pred, d_true)
calc_type = 2
if calc_type==1:
# this is my implementation
for word in d_pred:
tp += min(d_pred[word], d_true[word])
fp += max(0, d_pred[word]-d_true[word])
fn += max(0, d_true[word]-d_pred[word])
else:
# this is the code2seq implementation
for word in d_pred:
if d_pred[word]>0:
if d_true[word]>0:
tp += 1
else:
fp += 1
if d_true[word]>0 and d_pred[word]==0:
fn += 1
# print(tp, fp, fn)
precision = tp / (tp+fp+0.0000000001)
recall = tp / (tp+fn+0.0000000001)
f1 = 2*precision*recall / (precision+recall+0.0000000001)
exact_match /= N
word_level_accuracy = correct_words / total_words
d = {
'precision': precision*100,
'recall': recall*100,
'f1': f1*100,
'exact_match':exact_match*100,
'word-level accuracy': word_level_accuracy*100,
}
if bleu:
bleu_score = moses_multi_bleu(np.array(y_pred), np.array(y_true))
d['BLEU'] = bleu_score
return d
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--f_true', help='File with ground truth labels', required=True)
parser.add_argument('--f_pred', help='File with predicted labels', required=True)
parser.add_argument('--verbose', action='store_true', help='verbosity')
args = parser.parse_args()
assert os.path.exists(args.f_true), 'Invalid file for ground truth labels'
assert os.path.exists(args.f_pred), 'Invalid file for predicted labels'
return args
if __name__=="__main__":
args = parse_args()
calculate_metrics_from_files(args.f_pred, args.f_true, args.verbose)
| true |
4188af5928fd2bf07c7d943a4aae3d88f48cf44b | Python | freestylofil/PKSS_heat_installation | /energy_provider/ActualTime.py | UTF-8 | 923 | 2.734375 | 3 | [] | no_license | from datetime import datetime, timedelta
import ntplib
time_client = ntplib.NTPClient()
class ActualTime:
def __init__(self, date=datetime.now()):
self._date = date
self._date0 = date
self._period = timedelta(minutes=5)
@property
def date(self) -> datetime:
return self._date
@date.setter
def date(self, date: datetime) -> None:
self._date = date
def get_timestamp(self):
return int(datetime.timestamp(self._date))
def check_period(self) -> bool:
if (self._date - self._date0) >= self._period:
self._date0 = self.date
return True
else:
return False
#return False if timedelta.total_seconds((self._date0 - self._date) % self._period) else True
def get_time(host, port) -> datetime:
now = time_client.request(host, 3,port)
return datetime.fromtimestamp(int(now.tx_time))
| true |
e5ca586a2bbaebf65d532127f4e3b2eba0f0bef6 | Python | apjanco/LostVoicesCadenceViewer | /LV_Streamlit_Viewer_App.py | UTF-8 | 5,488 | 2.890625 | 3 | [
"CC0-1.0"
] | permissive | import streamlit as st
import pandas as pd
import altair as alt
import plotly.graph_objects as go
import networkx as nx
import numpy as np
import requests
st.header("Du Chemin Lost Voices Cadence Data")
# st.cache speeds things up by holding data in cache
#@st.cache
def get_data():
url = "https://raw.githubusercontent.com/RichardFreedman/LostVoicesCadenceViewer/main/LV_CadenceData.csv"
df = pd.read_csv(url)
cadence_json = requests.get("https://raw.githubusercontent.com/bmill42/DuChemin/master/phase1/data/duchemin.similarities.json").json()
df['similarity'] = cadence_json
return df
df = get_data()
# Dialogue to Show Raw Data as Table
if st.sidebar.checkbox('Show Complete Data Frame'):
st.subheader('Raw data')
st.write(df)
#tones = df['cadence_final_tone'].drop_duplicates()
tones = df[["cadence_final_tone", "cadence_kind", "final_cadence", "composition_number"]]
# This displays unfiltered
all_tone_diagram = alt.Chart(tones).mark_circle().encode(
x='final_cadence',
y='composition_number',
color='cadence_final_tone',
shape='cadence_kind'
)
if st.sidebar.checkbox('Show All Pieces with Their Cadences'):
st.subheader('All Pieces with Cadences')
st.altair_chart(all_tone_diagram, use_container_width=True)
# Dialogue to Select Cadence by Final Tone
st.subheader('Selected Cadences by Final Tone')
# Create a list of possible values and multiselect menu with them in it.
#cadence_list = tones['cadence_final_tone']
cadence_list = tones['cadence_final_tone'].unique()
cadences_selected = st.sidebar.multiselect('Select Tone(s)', cadence_list)
# Mask to filter dataframe
mask_cadences = tones['cadence_final_tone'].isin(cadences_selected)
tone_data = tones[mask_cadences]
# This is for filtered tones (just oned)
tone_diagram = alt.Chart(tone_data).mark_circle().encode(
x='cadence_kind',
y='composition_number',
color='final_cadence',
#shape='final_cadence',
tooltip=['cadence_kind', 'composition_number', 'final_cadence']
)
st.altair_chart(tone_diagram, use_container_width=True)
# This displays choice of piece
st.subheader('Selected Pieces')
piece_list = tones['composition_number'].unique()
pieces_selected = st.sidebar.multiselect('Select Piece(s)', piece_list)
# Mask to filter dataframe
mask_pieces = tones['composition_number'].isin(pieces_selected)
piece_data = tones[mask_pieces]
piece_diagram = alt.Chart(piece_data).mark_circle().encode(
x='cadence_final_tone',
y='cadence_kind',
color='final_cadence',
#shape='final_cadence'
)
st.altair_chart(piece_diagram, use_container_width=True)
###
#Graph Visualization
###
cadence_graph = nx.Graph()
# Add a node for each cadence
for index, row in df.iterrows():
cadence_graph.add_node(row.phrase_number, size=1.5)
# Add all the edges
for index, row in df.iterrows():
for i in row.similarity:
cadence_graph.add_edge(row.phrase_number, df['phrase_number'][i], weight=2)
# Get positions for the nodes in G
pos_ = nx.spring_layout(cadence_graph)
def make_edge(x, y, text, width):
'''Creates a scatter trace for the edge between x's and y's with given width
Parameters
----------
x : a tuple of the endpoints' x-coordinates in the form, tuple([x0, x1, None])
y : a tuple of the endpoints' y-coordinates in the form, tuple([y0, y1, None])
width: the width of the line
Returns
-------
An edge trace that goes between x0 and x1 with specified width.
'''
return go.Scattergl(x = x,
y = y,
line = dict(width = width,
color = 'cornflowerblue'),
hoverinfo = 'text',
text = ([text]),
mode = 'lines')
# For each edge, make an edge_trace, append to list
edge_trace = []
for edge in cadence_graph.edges():
char_1 = edge[0]
char_2 = edge[1]
x0, y0 = pos_[char_1]
x1, y1 = pos_[char_2]
text = char_1 + '--' + char_2
trace = make_edge([x0, x1, None], [y0, y1, None], text,
0.3*cadence_graph.edges()[edge]['weight']**1.75)
edge_trace.append(trace)
# Make a node trace
node_trace = go.Scattergl(x = [],
y = [],
text = [],
textposition = "top center",
textfont_size = 10,
mode = 'markers+text',
hoverinfo = 'none',
marker = dict(color = [],
size = [],
line = None))
# For each node in cadence_graph, get the position and size and add to the node_trace
for node in cadence_graph.nodes():
x, y = pos_[node]
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
node_trace['marker']['color'] += tuple(['cornflowerblue'])
node_trace['marker']['size'] += tuple([5*cadence_graph.nodes()[node]['size']])
# node_trace['phrase_number'] += tuple(['<b>' + node + '</b>'])
layout = go.Layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
fig = go.Figure(layout = layout)
for trace in edge_trace:
fig.add_trace(trace)
fig.add_trace(node_trace)
fig.update_layout(showlegend = False)
fig.update_xaxes(showticklabels = False)
fig.update_yaxes(showticklabels = False)
st.plotly_chart(fig) | true |
98666a65094838c6d2be7cac7e13a2bd64302432 | Python | Tr0ub1e/Izbushka | /printer_m.py | UTF-8 | 5,906 | 2.875 | 3 | [] | no_license | from bs4 import BeautifulSoup
class Make_html():
def __init__(self, start_date, end_date, car_data, usl_data, zap_data, money_data):
self.car_data = car_data
self.zap_data = zap_data
self.money_data = money_data
self.usl_data = usl_data
self.start_date, self.end_date = start_date, end_date
with open('testt2.html','r', encoding='utf-8') as file:
page = file.read()
self.soup = BeautifulSoup(page, 'html.parser')
def save_file(self):
self.insert_money_data(self.money_data)
self.insert_car_data(self.start_date, self.end_date, self.car_data)
self.insert_data(self.usl_data, self.zap_data)
with open('res.html', 'w', encoding='utf-8') as file:
file.write(str(self.soup))
def insert_car_data(self, start_date, end_date, data):
car, engine, year, gov_num, milliage, vin = data
tags = self.soup.find_all('span')
for i, d in enumerate(tags):
if d.string == '00.00.00г':
tags[i].string = ' '+start_date
tags[i+3].string = ' '+end_date
if d.string == 'Марка,модель ':
d.string += ' '+car
if d.string == 'Двигатель №':
d.string += ' '+engine
if d.string == 'Год выпуска':
d.string += ' '+str(year)
if d.string == 'Пробег':
d.string = 'Пробег '+str(milliage)+' km'
if d.string == 'Государственный рег.номер':
d.string += ' '+gov_num
if d.string == 'VIN':
d.string += ' '+vin
def insert_money_data(self, data):
nds_data = list(
map(lambda i:round(i, 3),
list(
map(lambda x: x*0.17, data)
)
)
)
nds_data = list(map(str, nds_data))
nds_work, nds_other, nds_zap = nds_data
data = list(map(str, data))
work_sum, other_sum, zap_sum = data
tags = self.soup.find_all('span')
for i, d in enumerate(tags):
if d.string == 'работа_сум': d.string = work_sum
if d.string == 'другое_сум': d.string = other_sum
if d.string == 'запч_сум': d.string = zap_sum
if d.string == 'всего_сум':
d.string = str(sum(map(int, (work_sum, other_sum, zap_sum))))
if d.string == 'работа_ндс': d.string = nds_work
if d.string == 'другое_ндс': d.string = nds_other
if d.string == 'запч_ндс': d.string = nds_zap
if d.string == 'всего_ндс':
d.string = str(sum(map(float, (nds_work, nds_other, nds_zap))))
if d.string == 'сумма_ндс':
d.string = str(sum(map(float, (work_sum, nds_work))))
if d.string == 'другое_сум_ндс':
d.string = str(sum(map(float, (other_sum, nds_other))))
if d.string == 'запч_сум_ндс':
d.string = str(sum(map(float, (zap_sum, nds_zap))))
if d.string == 'всего_сум_ндс':
d.string = str(sum(map(float, (
str(sum(map(int, (work_sum, other_sum, zap_sum)))),
str(sum(map(float, (nds_work, nds_other, nds_zap))))
))))
def __gen_usluga_data(self, data):
tags = self.soup.find_all('table')
for i, d in enumerate(tags):
for j, dd in enumerate(d.find_all('span')):
if dd.string == 'Код работы':
for m in range(len(data)):
new_tr = self.soup.new_tag('tr')
self.__gen_table(new_tr, 6)
d.append(new_tr)
return
def insert_data(self, usluga_data, parts_data):
self.__gen_usluga_data(usluga_data)
self.__gen_parts_data(parts_data)
u = [x for i in usluga_data for x in i]
p = [x for i in parts_data for x in i]
tags = self.soup.find_all('table')
for i, d in enumerate(tags):
for j, dd in enumerate(d.find_all('span')):
if i == 2 and dd.string == 'NEW ITEM':
d.find_all('span')[j].string = ' '+str(u[0])
u.remove(u[0])
if i == 3 and dd.string == 'NEW ITEM':
d.find_all('span')[j].string = ' '+str(p[0])
p.remove(p[0])
def __gen_parts_data(self, data):
tags = self.soup.find_all('table')
for i, d in enumerate(tags):
for j, dd in enumerate(d.find_all('span')):
if dd.string == 'Код запчасти':
for m in range(len(data)):
new_tr = self.soup.new_tag('tr')
self.__gen_table(new_tr, 5)
d.append(new_tr)
return
def __gen_table(self, tr, columns):
for k in range(columns):
td = self.soup.new_tag('td')
td['style'] = "vertical-align:top; padding-left:0; padding-right:0; padding-top:0; padding-bottom:0;"
p = self.soup.new_tag('p')
p['style'] = " margin-top:12px; margin-bottom:12px; margin-left:0px; \
margin-right:0px; -qt-block-indent:0; text-indent:0px;"
span = self.soup.new_tag('span')
span['style'] = " font-size:8pt;"
span.string = 'NEW ITEM'
p.append(span)
td.append(p)
tr.append(td)
return tr
if __name__ == '__main__':
Make_html().save_file()
| true |
29759020b585332831001c851ec55c7fc8bef016 | Python | beCharlatan/gu_ai | /pyalgs/lesson3/task02.py | UTF-8 | 648 | 4.15625 | 4 | [] | no_license | # 2. Во втором массиве сохранить индексы четных элементов первого массива. Например, если дан массив со значениями 8, 3, 15, 6, 4, 2, второй массив надо заполнить значениями 0, 3, 4, 5 (помните, что индексация начинается с нуля), т. к. именно в этих позициях первого массива стоят четные числа.
first = [8, 3, 15, 6, 4, 2]
second = []
for i in range(0, len(first)):
if first[i] % 2 == 0:
second.append(i)
print(second) | true |
3752b2c94976fdaa9a4d1b0e636b7c723c0a6f3a | Python | mit-ccrg/ml4c3-mirror | /tensorize/bedmaster/bedmaster_stats.py | UTF-8 | 7,310 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | # Imports: standard library
import os
from typing import Dict
# Imports: third party
import numpy as np
import pandas as pd
# Imports: first party
from tensorize.bedmaster.data_objects import BedmasterSignal
class BedmasterStats:
"""
Class that gets together the summary data from all the writers.
It is used to organize this data and create a csv with all the
information.
"""
def __init__(self):
self.signal_stats: Dict[str, Dict[str, int]] = {}
self.file_stats: Dict[str, int] = self.init_files_dict()
@staticmethod
def init_signal_dict():
return {
"channel": [],
"files": 0,
"source": "",
"points": 0,
"min": None,
"mean": None,
"max": None,
"dataevents": 0,
"sample_freq": {},
"multiple_freq": 0,
"units": [],
"scale_factor": [],
"nan_on_time": 0,
"nan_on_values": 0,
"overlapped_points": 0,
"total_overlap_bundles": 0,
"string_value_bundles": 0,
"defective_signal": 0,
}
@staticmethod
def init_files_dict():
return {
"total_files": 0,
"missing_vs": 0,
"missing_wv": 0,
"no_label_signal": 0,
"multiple_label_signal": 0,
}
@staticmethod
def add_percentages(dataframe, column, denominator):
col_idx = dataframe.columns.to_list().index(column)
if dataframe[column].dtype.name == "object":
dataframe[column] = pd.to_numeric(dataframe[column])
if not isinstance(denominator, int) and denominator.dtype.name == "object":
denominator = pd.to_numeric(denominator)
new_column = (dataframe[column] / denominator * 100).fillna(0)
dataframe.insert(col_idx + 1, f"{column}_%", new_column)
def add_signal_stats(self, signal, key, value=1, overwrite=False, source=None):
if source:
signal = f"{signal}_vs" if source == "vitals" else f"{signal}_wv"
if signal not in self.signal_stats:
self.signal_stats[signal] = self.init_signal_dict()
if key not in self.signal_stats[signal]:
raise ValueError(f"Wrong key for summary stats: {key}")
if isinstance(self.signal_stats[signal][key], dict):
self._increment_dict(signal, key, value)
elif isinstance(self.signal_stats[signal][key], list):
self._increment_list(signal, key, value)
else:
if overwrite:
self.signal_stats[signal][key] = value
else:
self.signal_stats[signal][key] += value
def _increment_dict(self, signal, key, value):
if value not in self.signal_stats[signal][key]:
self.signal_stats[signal][key][value] = 1
else:
self.signal_stats[signal][key][value] += 1
def _increment_list(self, signal, key, value):
current_values = self.signal_stats[signal][key]
if value not in current_values:
current_values.append(value)
def add_file_stats(self, key):
if key not in self.file_stats:
raise ValueError(f"Wrong key for summary stats: {key}")
self.file_stats[key] += 1
def _update_mean(self, signal_index: str, add_mean: float, add_points: int):
old_mean = self.signal_stats[signal_index]["mean"]
if old_mean:
old_points = self.signal_stats[signal_index]["points"]
all_points = old_points + add_points
new_mean = (
old_mean * old_points / all_points + add_mean * add_points / all_points
)
else:
new_mean = add_mean
return new_mean
def add_from_signal(self, signal: BedmasterSignal):
signal_name = (
f"{signal.name}_vs" if signal.source == "vitals" else f"{signal.name}_wv"
)
if signal_name not in self.signal_stats:
self.signal_stats[signal_name] = self.init_signal_dict()
self.add_signal_stats(signal_name, "files")
for field in ["channel", "units", "scale_factor"]:
self.add_signal_stats(signal_name, field, getattr(signal, field))
self.add_signal_stats(signal_name, "source", signal.source, overwrite=True)
for sample_freq, _ in signal.sample_freq:
self.add_signal_stats(signal_name, "sample_freq", sample_freq)
if len(signal.sample_freq) > 1:
self.add_signal_stats(signal_name, "multiple_freq")
old_min = self.signal_stats[signal_name]["min"]
add_min = signal.value.min()
new_min = min(old_min, add_min) if old_min else add_min
self.add_signal_stats(signal_name, "min", new_min, overwrite=True)
new_mean = self._update_mean(
signal_name,
signal.value.mean(),
signal.value.size,
)
self.add_signal_stats(signal_name, "mean", new_mean, overwrite=True)
old_max = self.signal_stats[signal_name]["max"]
add_max = signal.value.max()
new_max = max(old_min, add_min) if old_max else add_max
self.add_signal_stats(signal_name, "max", new_max, overwrite=True)
self.add_signal_stats(signal_name, "points", signal.value.size)
de_num = np.where(np.unpackbits(signal.time_corr_arr))[0].size
self.add_signal_stats(signal_name, "dataevents", de_num)
time_nans = np.where(np.isnan(signal.time))[0].size
self.add_signal_stats(signal_name, "nan_on_time", time_nans)
value_nans = np.where(np.isnan(signal.value))[0].size
self.add_signal_stats(signal_name, "nan_on_values", value_nans)
def to_csv(self, output_dir, files_base_name):
# Create signals dataframe
signal_stats_df = pd.DataFrame(self.signal_stats).T
for column in ["nan_on_time", "nan_on_values", "overlapped_points"]:
self.add_percentages(signal_stats_df, column, signal_stats_df["points"])
self.add_percentages(signal_stats_df, "files", self.file_stats["total_files"])
self.add_percentages(
signal_stats_df,
"total_overlap_bundles",
signal_stats_df["files"],
)
signal_stats_df = signal_stats_df.round(2)
signal_stats_df = signal_stats_df.rename_axis("signal").reset_index()
signal_stats_df = signal_stats_df.sort_values(
by=["source", "files"],
ascending=[False, False],
)
signal_stats_df["signal"] = signal_stats_df["signal"].apply(lambda x: x[:-3])
# Save DF to csv
signal_stats_df.to_csv(
os.path.join(output_dir, f"{files_base_name}_bedmaster_signal_stats.csv"),
index=False,
)
# Create files dataframe
file_stats_df = pd.DataFrame(
self.file_stats.items(),
columns=["issue", "count"],
)
self.add_percentages(file_stats_df, "count", self.file_stats["total_files"])
file_stats_df = file_stats_df.round(2)
# Save df to csv
file_stats_df.to_csv(
os.path.join(output_dir, f"{files_base_name}_bedmaster_files_stats.csv"),
index=False,
)
| true |
18758edb44b3d5cf3840ceed19909e3174a9e335 | Python | Nicolas-Fernandez/ChineseRemainder | /PiratesV1.py | UTF-8 | 2,964 | 3.53125 | 4 | [] | no_license | import random
print ("")
print ("You are a poor chinese slave cook on a bloodthirsty pirates ship.")
NBPIRATES1 = int (input ("How many pirates on this ship? (7)--> "))
print("")
print ("After their last ritual,"), (NBPIRATES1), ("the forbans finally decided to share their magot ...")
print ("The chest contains an integer x of gold coins.")
print("")
print ("They share these coins fairly, but there is a little left.")
print ("In their great magnanimity, they decide to offer you his remaining pieces!")
REMAIN1 = int (input ("How many coins did you receive? (2)--> "))
print("")
print ("But a mutiny broke out then, making many victims ...")
NBPIRATES2 = NBPIRATES1 - int (input ("How many pirates died in this tragic event? (2)--> "))
print("")
print ("The"), (NBPIRATES2), ("survivors again share fairly ALL the coins, but they still have a few more.")
print ("In their great mansuetude, they decide to offer you again this remaining pieces!")
REMAIN2 = int (input ("How many coins did you receive this time? (3)--> "))
print("")
print ("But now a terrible storm breaks out and the ship crashes on rocks ...")
NBPIRATES3 = int (input ("You survived! Stranded on the beach, how many pirates find you by your side? (3)--> "))
print("")
print ("The"), (NBPIRATES3), ("remaining pirates, again share fairly ALL the pieces, but they still have a little bit of them.")
print ("In their great kindness, they decide to offer you again this remaining pieces!")
REMAIN3 = int (input ("How many coins did you receive after this last disbursement? (2)--> "))
print("")
print ("And while you prepare a delicious coconut turtle ragout ...")
print (" you wonder how many gold coins you can get at least ... ")
print (" if you poison this unfortunate survivors!")
print ("")
ANSWER = int (input ("Is that true ... How many pieces does this mysterious chest contain? (23)--> "))
LASTMODULO = NBPIRATES1 * NBPIRATES2 * NBPIRATES3
print ("")
print ("Congruance ="), (LASTMODULO)
UNKNOW1 = NBPIRATES2 * NBPIRATES3
while (UNKNOW1 % NBPIRATES1) != 1:
UNKNOW1 = UNKNOW1 * 2
UNKNOW2 = NBPIRATES1 * NBPIRATES3
while (UNKNOW2 % NBPIRATES2) != 1:
UNKNOW2 = UNKNOW2 * 2
UNKNOW3 = NBPIRATES1 * NBPIRATES2
while (UNKNOW3 % NBPIRATES3) != 1:
UNKNOW3 = UNKNOW3 * 2
TREASURE = (UNKNOW1 * REMAIN1 + UNKNOW2 * REMAIN2 + UNKNOW3 * REMAIN3) % LASTMODULO
if ANSWER == TREASURE:
print("")
print ("Congratulations, there was at least"), (TREASURE), ("gold coins in this chest!")
print ("Have you put into action your machiavelic plan? Only you have the answer ...")
else:
print("")
print ("And no, you were wrong ... There was at least"), (TREASURE), ("gold coins in this chest ...")
print ("But, do you have to realize your sinister project? Only you have the answer ...")
print ("")
print ("Press enter key to close this game.")
print ("Bye!")
print ("")
| true |
c24d65732413c0abbe3c46313eb6fd9cdb97c646 | Python | dhrvdwvd/practice | /python_programs/95_requests_module.py | UTF-8 | 510 | 3.28125 | 3 | [] | no_license | import requests
# Now let's try to get a webpage. For this e.g., let's try
# to get Github's public timeline:
r = requests.get("https://api.github.com/events")
# Now we have a Response object called r. We can get all the
# information from this object.
# Requests' simple API means that all HTTP requests are obvious.
# For e.g., this is how you make a HTTP POST request.
r = requests.post("http://httpbin.org/post", data = {'key':'value'})
# Keep visiting this file and understand this module completely. | true |
660b50432d3014854a38b8a3ffee12599ef519e6 | Python | chaoshoo/python | /machineL/com/chaos/machineL/LogisticRegression.py | UTF-8 | 4,189 | 2.921875 | 3 | [] | no_license | '''
Created on 2016年7月19日
@author: Hu Chao
'''
import random;
import matplotlib.pyplot as plt;
import numpy as np;
import copy
import com.chaos.machineL.Helper as Helper
from com.chaos.machineL import GradientDescent
def initTheta(exampleXs):
theta = [];
for exampleX in exampleXs:
while len(exampleX) > len(theta):
theta.append(random.uniform(10, 100));
return np.mat(theta).T
class LogisticRegression(Helper.Helper):
def __init__(self, start, end, param):
Helper.Helper.__init__(self, [[x / 1000, self.__hypothesis([x / 1000, 1], [param, 0])]for x in range(start , end)], initTheta)
self.__gradient = GradientDescent.GradientDescent(self.getExampleXs(), self.getExampleYs(), self.getTheta())
def __hypothesis(self, exampleX, theta):
htheta = 0;
for j, value in enumerate(exampleX):
htheta = htheta + (theta[j] * value);
return 1 / (1 + np.exp(-1 * htheta + random.uniform(-0.8 * htheta, 0.8* htheta)));
def hypothesis(self, exampleX, theta):
htheta = theta.T.dot(exampleX);
return 1 / (1 + np.exp(-1 * htheta));
def __stochasticGradient(self, exampleY, exampleX, theta):
return (exampleX).dot(exampleY - self.hypothesis(exampleX, theta))
def __batchGradient(self, theta):
row, column = self.getExampleXs().shape
result = np.mat([0 for x in range(0, row)]).T
index = 0
while index < column :
delta = self.__stochasticGradient(self.getExampleYs()[index], self.getExampleXs().T[index].T, theta)
result = result + delta
index = index + 1
return result
def __hessian(self, theta):
row, column = theta.shape
hessionA = np.zeros((row,row))
for i in range(0, row):
for j in range(0, row):
hessionA[i][j] = self.__hessionElement(i, j, theta)
return np.mat(hessionA)
def __hessionElement(self, i, j, theta):
result = 0
h = self.hypothesis(self.getExampleXs(), theta)
h = h - np.power(h, 2)
row, column = h.shape
index = 0
while index < column :
# print(self.getExampleXs().T[index].getA1()[i])
# print(self.getExampleXs().T[index].getA1()[j])
# print(h)
result = result - self.getExampleXs().T[index].getA1()[i] * self.getExampleXs().T[index].getA1()[j] * h.getA1()[index]
index = index + 1
return result;
def newton(self):
theta = copy.deepcopy(self.getTheta())
count = 10
try:
while count > 0:
hession = self.__hessian(theta)
hessionI = hession.I
gradient = self.__batchGradient(theta)
theta = theta - hessionI.dot(gradient)
count = count - 1
finally:
print(count)
return theta
def stochasticGradient(self, step):
return self.__gradient.stochasticGradient(self.__stochasticGradient, step)
def batchGradient(self, step, divisor):
return self.__gradient.batchGradient(self.__batchGradient, step, divisor)
if __name__ == '__main__':
logisticRe = LogisticRegression(-50000, 50000, 3)
originPointX = [value[0] for value in logisticRe.getExamples()];
originPointY = [value[1] for value in logisticRe.getExamples()];
x = np.linspace(-50, 50, 100000);
plt.plot(originPointX, originPointY, 'ro');
stochastic = logisticRe.stochasticGradient(0.0001);
print(stochastic);
stochasticY = [logisticRe.hypothesis(np.mat([value,1]).T, stochastic).A[0] for value in x]
plt.plot(x, stochasticY, 'g');
batch = logisticRe.batchGradient(0.0001, 0.00001);
print(batch);
batchY = [logisticRe.hypothesis(np.mat([value,1]).T, batch).A[0] for value in x];
plt.plot(x, batchY, 'b');
newton = logisticRe.newton().T.getA1()
print(newton)
newtonY = [logisticRe.hypothesis(np.mat([value,1]).T, newton).A[0] for value in x];
plt.plot(x, newtonY, 'r');
plt.show();
| true |
43e65ffad501c3be360f5a70110e0925f55cc4d7 | Python | GlenEder/AdventOfCode2017 | /Day6/partA.py | UTF-8 | 1,099 | 3.1875 | 3 | [] | no_license | import copy
def hasHappened(listA, fullList):
for i in fullList:
if listA == i:
return True
return False
with open("input.txt") as f:
data = f.read()
numberWords = data.split('\t')
numbers = []
for i in range(len(numberWords)):
numbers.append(int(numberWords[i]))
steps = 0
previousPatterens = [numbers]
while True:
steps = steps + 1
newNumbers = copy.copy(previousPatterens[len(previousPatterens) - 1])
max = newNumbers[0]
posOfMax = 0
pos = 0
#find highest block
for i in newNumbers:
pos = pos + 1
if i > max:
max = i
posOfMax = pos
#distrubite block
posOfMax = posOfMax - 1 #acount for starting at index 0
if(posOfMax < 0):
posOfMax = 0
newNumbers[posOfMax] = 0
while max > 0:
posOfMax = posOfMax + 1
if(posOfMax >= len(newNumbers)):
posOfMax = posOfMax - len(newNumbers)
newNumbers[posOfMax] = newNumbers[posOfMax] + 1
max = max - 1
if hasHappened(newNumbers, previousPatterens):
print(steps)
exit(0)
else:
print(newNumbers)
previousPatterens.append(newNumbers)
| true |
a2d98dc60df3619e1418ca22ba470b374ae6f41d | Python | ChalamiuS/desubot | /plugins/ap-marathon.py | UTF-8 | 989 | 2.75 | 3 | [] | no_license | from motobot import command
from requests import get
from bs4 import BeautifulSoup
from time import time
from re import sub
@command('marathonlist')
def marathonlist_command(bot, nick, channel, message, args):
return "The marathon list can be found at {}.".format(url)
@command('marathon')
def marathon_command(bot, nick, channel, message, args):
title, date, link, note = get_current_marathon()
return "Today's marathon ({}) is {} ({}) {}".format(
date, title, link, note
)
def get_current_marathon():
url = 'https://marathon.chalamius.se/calendar.json'
entries = get(url).json()['items']
entry = entries[-1]
return entry['name'], entry['date'], entry['url'], entry['note']
@command('pantsu')
@command('pants')
@command('panties')
def pants_command(bot, nick, channel, message, args):
url = 'https://www.youtube.com/watch?v=T_tAoo787q4'
title = 'Sora no Otoshimono #2 Creditless ED'
return 'Panties! {} - {}'.format(title, url)
| true |
a0672f1df40ffc642f741250905b84b2b5bd93d4 | Python | xuan-w/wp-blog | /_posts/convert_pandoc.py | UTF-8 | 6,973 | 2.703125 | 3 | [] | no_license | #!/usr/bin/python3
# ---coding=utf-8 -----
import re, os, glob, sys, shutil
def is_empty(s):
return len(s.strip()) == 0
cjk_ranges = [
(0x4E00, 0x62FF),
(0x6300, 0x77FF),
(0x7800, 0x8CFF),
(0x8D00, 0x9FCC),
(0x3400, 0x4DB5),
(0x20000, 0x215FF),
(0x21600, 0x230FF),
(0x23100, 0x245FF),
(0x24600, 0x260FF),
(0x26100, 0x275FF),
(0x27600, 0x290FF),
(0x29100, 0x2A6DF),
(0x2A700, 0x2B734),
(0x2B740, 0x2B81D),
(0x2B820, 0x2CEAF),
(0x2CEB0, 0x2EBEF),
(0x2F800, 0x2FA1F)
]
def is_cjk(char):
char = ord(char)
for bottom, top in cjk_ranges:
if bottom <= char <= top:
return True
return False
def join_lines(line1, line2):
if is_cjk(line2[0]):
return line1 + line2
else:
return line1 + ' ' + line2
def pangu(lines):
new_lines = []
for line in lines:
tlist = []
n = len(line)
for i, char in enumerate(line):
if char == '2':
1+1
if is_cjk(char) or not char.isalnum():
tlist.append(char)
else:
if i > 0 and is_cjk(line[i - 1]):
tlist.append(' ')
tlist.append(char)
if i < n and is_cjk(line[i + 1]):
tlist.append(' ')
new_lines.append(''.join(tlist))
return new_lines
def replace_quotation_mark(lines):
line = ''.join(lines)
n = len(line)
tlist = []
count = 0
stack = []
flag = False
for i, char in enumerate(line):
tlist.append(char)
if is_cjk(char) and count > 0:
flag = True
if char == '“':
stack.append(len(tlist) - 1)
count += 1
if char == '”':
stack.append(len(tlist) - 1)
count -= 1
if count < 0:
count = 0
if count == 0 and flag:
for j in stack:
if tlist[j] == '“':
tlist[j] = '「'
if tlist[j] == '”':
tlist[j] = '」'
flag = False
return ''.join(tlist)
def remove_endings(lines):
new_lines = []
temp = None
for i, line in enumerate(lines):
n = len(lines)
if temp is None:
if not is_empty(line):
# begin a paragraph
temp = line[:-1]
else:
# end a paragraph
if is_empty(line):
new_lines.append(temp + '\n')
new_lines.append('\n')
temp = None
else:
# continue a paragraph
temp = join_lines(temp, line[:-1])
if temp is not None:
new_lines.append(temp + '\n')
return new_lines
def correct_img_name(path, new_name):
old_name = os.path.split(path)[-1]
stem, ext = os.path.splitext(old_name)
if ext == '.tmp':
for name in glob.glob('media/%s*' % stem):
new_ext = os.path.splitext(name)[1]
if new_ext != '.tmp':
ext = new_ext
break
return new_name + ext, stem + ext
def process_image(lines):
prefix = None
for i, line in enumerate(lines):
line = re.sub(r'(!\[[^\[\]]*\]\([^()]*\))\{[^{}]*\}', r'\1', line)
if re.search(r'!\[[^\[\]]*\]\([^()]*\)', line) is not None:
if prefix is None:
prefix = input('Please enter image prefix \n')
org_path = re.search(r'!\[[^\[\]]*\]\(([^()]*)\)', line).group(1)
img_name = input('Please input new name for %s \n' % org_path)
img_name = img_name.replace(' ', '-')
img_name, old_img_name = correct_img_name(org_path, img_name)
new_path = '../../images/%s-%s' % (prefix, img_name)
org_alt = re.search(r'!\[([^\[\]]*)\]\([^()]*\)', line).group(1)
new_alt = input('Please input new alt for %s, original alt was %s \n' % (org_path, org_alt))
line = re.sub(r'(!\[)[^\[\]]*(\]\()[^()]*(\))', r'\1 ' + new_alt + r' \2' + new_path + r'\3', line)
shutil.copy('media/' + old_img_name, '../images/' + '%s-%s' % (prefix, img_name))
lines[i] = line
return lines
def get_indent(line):
match = re.search(r'^\s*(?:[0-9a-zA-Z#]\.)?[-+*]?\s+', line)
if match is not None:
return len(match.group(0))
else:
return 0
def left_strip_quotation(line):
match = re.search(r'^>?\s+', line)
if match is not None:
return line[len(match.group(0)):]
else:
return line
def remove_quoted_block(lines):
new_lines = []
indentation = 0
one_end = False
for line in lines:
if line[0] == '>':
new_lines.append(' ' * indentation + left_strip_quotation(line))
else:
if line == '\n':
# if there are two \n, restart indentation count
if one_end:
one_end = False
indentation = 0
else:
one_end = True
else:
indentation = get_indent(line)
one_end = False
new_lines.append(line)
return new_lines
def get_list_input(input_name):
tags = []
while True:
tag_input = input('Please input %s, d to delete previous input\n' % input_name)
if tag_input == '':
break
if tag_input == 'd' and len(tags) > 0:
poped = tags.pop()
print('%s was deleted' % poped)
else:
tags.append(tag_input)
return tags
def generate_head():
title = input('Please input title\n')
slug = input('Please input slug for this post\n')
year = input('Please input year\n')
month = input('Please input month\n')
day = input('Please input day\n')
time = input('Please input time\n')
tags = get_list_input('tags')
cats = get_list_input('categories')
if len(tags) == 0:
s_tags = '[ ]'
else:
s_tags = '\n - ' + '\n - '.join(tags)
if len(cats) == 0:
s_cats = ' - Uncategorized'
else:
s_cats = ' - ' + '\n - '.join(cats)
return "---\npost_title: '%s'\npost_name: '%s'\npost_date: '%s-%s-%s %s'\nlayout: post\npublished: true\ntags: %s\ncategories:\n%s\n---\n" % (
title, slug, year, month, day, time, s_tags, s_cats)
if __name__ == '__main__':
# in_md = sys.argv[1]
# out_md = sys.argv[2]
in_md = 'tmp.md'
out_md = 'tout.md'
head = ''
with open(in_md, encoding='utf-8') as fp, open(out_md, 'w', encoding='utf-8') as outfp:
lines = fp.readlines()
# head = generate_head()
lines = remove_quoted_block(lines)
lines = remove_endings(lines)
lines = pangu(lines)
lines = process_image(lines)
lines = replace_quotation_mark(lines)
lines = head + lines
outfp.writelines(lines)
| true |
f20381ed8aca0d2f86228542a30b4afcbb9fc349 | Python | offbynull/offbynull.github.io | /docs/data/learn/Bioinformatics/output/ch9_code/src/Router.py | UTF-8 | 409 | 2.984375 | 3 | [] | no_license | if __name__ == '__main__':
import importlib
val = input()
val = val.split()
if len(val) == 1:
module_name = val[0]
function_name = 'main'
elif len(val) == 2:
module_name = val[0]
function_name = val[1]
else:
raise ValueError(f'Too many parameters: {val}')
module = importlib.import_module(module_name)
getattr(module, function_name)()
| true |
2192f442ed983603565f3626e35b9676d22fb9af | Python | kjnh10/pcw | /work/atcoder/abc/abc051/D/answers/056036_hs484.py | UTF-8 | 521 | 2.796875 | 3 | [] | no_license | N,M = map(int,input().split())
INF = 100000000
g = [ [INF] * N for _ in range(N) ]
for _ in range(M):
a,b,c = map(int,input().split())
a-=1
b-=1
g[a][b] = c
g[b][a] = c
t = [ [INF] * N for _ in range(N) ]
for i in range(N):
for j in range(N):
t[i][j] = g[i][j]
for k in range(N):
for i in range(N):
for j in range(N):
t[i][j] = min(t[i][j], t[i][k] + t[k][j])
ans = 0
for i in range(N):
for j in range(i):
if g[i][j] != INF:
if t[i][j] != g[i][j]:
ans += 1
print(ans) | true |
e4bfc023bcc10eae1b4b5bc0c17bc6f6d3471367 | Python | WEgeophysics/watex | /examples/applications/plot_data_exploratory_quick_view.py | UTF-8 | 8,710 | 3.296875 | 3 | [
"BSD-3-Clause"
] | permissive | """
=====================================================
Data exploratory: Quick view
=====================================================
Real-world examples for data exploratory, visualization, ...
"""
# Author: L.Kouadio
# Licence: BSD-3-clause
#%%
# Import required modules
import matplotlib.pyplot as plt
from watex.view import ExPlot, QuickPlot, TPlot
from watex.datasets import fetch_data , load_bagoue , load_edis
from watex.transformers import StratifiedWithCategoryAdder
#%%
# Data Exploratory with :class:`~watex.view.ExPlot`
# ---------------------------------------------------
# Explore data for analysis purpose
# `ExPlot` is a shadow class. Exploring data is needed to create a model since
# it gives a feel for the data and is also at great excuse to meet and discuss
# issues with business units that control the data. `ExPlot` methods i.e.
# return an instanced object that inherits from :class:`~watex.property.Baseplots`
# ABC (Abstract Base Class) for visualization
# It gives some data exploration tricks. Here are a few examples for analysis
# and visualization
#%%
# * Use parallel coordinates in multivariates for clustering visualization
# (Need yelowbrick to be installed if 'pkg' argument is set to 'yb')
data =fetch_data('original data').get('data=dfy1')
p = ExPlot (tname ='flow').fit(data)
p.plotparallelcoords(pkg='pd')
#%%
# * Plot each sample on a circle or square, with features on the circumference
# to visualize separately between targets.
data2 = fetch_data('bagoue original').get('data=dfy2')
p = ExPlot(tname ='flow').fit(data2)
p.plotradviz(classes= None, pkg='pd' )
#%%
# * Create pairwise comparisons between features.
# Plots shows a ['pearson'|'spearman'|'covariance'] correlation.
data = fetch_data ('bagoue original').get('data=dfy1')
p= ExPlot(tname='flow').fit(data)
p.plotpairwisecomparison(fmt='.2f', corr='spearman',
annot=True,
cmap='RdBu_r',
vmin=-1,
vmax=1 )
#%%
# Create a pair grid.
# Is a matrix of columns and kernel density estimations.
# To colorize by columns from a data frame, use the 'hue' parameter.
data = fetch_data ('bagoue original').get('data=dfy1')
p= ExPlot(tname='flow').fit(data)
p.plotpairgrid (vars = ['magnitude', 'power', 'ohmS'] )
#%%
# Features analysis with :class:`~watex.view.QuickPlot`
# ---------------------------------------------------------
# Special class dealing with analysis modules for quick diagrams,
# histograms, and bar visualization.
# Originally, it was designed for the flow rate prediction, however, it still
# works with any other dataset by following the details of the parameters. Here are
# some quick features analysis examples.
#%%
# * Create a plot of naive visualization
df = load_bagoue ().frame
stratifiedNumObj= StratifiedWithCategoryAdder('flow')
strat_train_set , *_= stratifiedNumObj.fit_transform(X=df)
pd_kws ={'alpha': 0.4,
'label': 'flow m3/h',
'c':'flow',
'cmap':plt.get_cmap('jet'),
'colorbar':True}
qkObj=QuickPlot(fs=25.)
qkObj.fit(strat_train_set)
qkObj.naiveviz( x= 'east', y='north', **pd_kws)
#%%
# * Provide the names of the features at least 04 and discuss their distribution.
# This method maps a dataset onto multiple axes arrayed in a grid of
# rows and columns that correspond to levels of features in the dataset.
# The plots it produces are often called “lattice”, “trellis”, or
# 'small multiple graphics.
data = load_bagoue ().frame
qkObj = QuickPlot( leg_kws={'loc':'upper right'},
fig_title = '`sfi` vs`ohmS|`geol`',
)
qkObj.tname='flow' # target the DC-flow rate prediction dataset
qkObj.mapflow=True # to hold category FR0, FR1 etc..
qkObj.fit(data)
sns_pkws={'aspect':2 ,
"height": 2,
}
map_kws={'edgecolor':"w"}
qkObj.discussingfeatures(features =['ohmS', 'sfi','geol', 'flow'],
map_kws=map_kws, **sns_pkws
)
#%%
# * Joint method allows the visualization correlation of two features.
# Draw a plot of two features with bivariate and univariate graphs.
data = load_bagoue ().frame
qkObj = QuickPlot( lc='b', sns_style ='darkgrid',
fig_title='Quantitative features correlation'
).fit(data)
sns_pkws={
'kind':'reg' , #'kde', 'hex'
# "hue": 'flow',
}
joinpl_kws={"color": "r",
'zorder':0, 'levels':6}
plmarg_kws={'color':"r", 'height':-.15, 'clip_on':False}
qkObj.joint2features(features=['ohmS', 'lwi'],
join_kws=joinpl_kws, marginals_kws=plmarg_kws,
**sns_pkws,
)
#%%
# Tensors recovery with :class:`~watex.view.TPlot`
# ---------------------------------------------------------
# Tensor plot from EM processing data
# `TPlot` is a Tensor (Impedances, resistivity, and phases ) plot class.
# Explore SEG ( Society of Exploration Geophysicist ) class data. Plot recovery
# tensors. `TPlot` method returns an instanced object that inherits
# from :class:`watex.property.Baseplots` ABC (Abstract Base Class) for
# visualization. Here are a few demonstration examples.
#%%
# * Plot multiple sites/stations with signal recovery.
# takes the 03 samples of EDIs
edi_data = load_edis (return_data= True, samples =3 )
TPlot(fig_size =(5, 3), font_size=7., sns_style='ticks').fit(edi_data).plot_multi_recovery (
sites =['S00'], colors =['o', 'ok--'])
#%%
# * Plot two-dimensional recovery tensor
# get some 12 samples of EDI for the demo
edi_data = load_edis (return_data =True, samples =12 )
# customize the plot by adding plot_kws
plot_kws = dict( ylabel = '$Log_{10}Frequency [Hz]$',
xlabel = '$Distance(m)$',
cb_label = '$Log_{10}Rhoa[\Omega.m$]',
fig_size =(7, 4),
font_size =7.
)
t= TPlot(**plot_kws ).fit(edi_data)
# plot recovery2d using the log10 resistivity
t.plot_tensor2d (to_log10=True)
#%%
# * Plot two-dimensional filtered tensors using the default trimming moving-average (AMA) filter
# take the 12 samples of EDI and plot the corrected tensors
edi_data = load_edis (return_data =True, samples =12 )
# customize plot by adding plot_kws
plot_kws = dict( ylabel = '$Log_{10}Frequency [Hz]$',
xlabel = '$Distance(m)$',
cb_label = '$Log_{10}Rhoa[\Omega.m$]',
fig_size =(7, 4),
font_size =7.
)
t= TPlot(**plot_kws ).fit(edi_data)
# plot filtered tensor using the log10 resistivity
t.plot_ctensor2d (to_log10=True)
#%%
# Model evaluation with :class:`~watex.view.EvalPlot`
# ---------------------------------------------------------
# Metric and dimensionality Evaluation Plots
# `EvalPlot` Inherited from :class:`BasePlot`. Dimensional reduction and metric
# plots. The class works only with numerical features.
#%%
# * Plot ROC for RandomForest classifier
from watex.exlib.sklearn import RandomForestClassifier
from watex.datasets.dload import load_bagoue
from watex.utils import cattarget
from watex.view.mlplot import EvalPlot
X , y = load_bagoue(as_frame =True )
rdf_clf = RandomForestClassifier(random_state= 42) # our estimator
b= EvalPlot(scale = True , encode_labels=True)
b.fit_transform(X, y)
# binarize the label b.y
ybin = cattarget(b.y, labels= 2 ) # can also use labels =[0, 1]
b.y = ybin
b.font_size=7.
b.lc ='r'
b.lw =7.
b.sns_style='ticks'
b.plotROC(rdf_clf , label =1, method ="predict_proba") # class=1
#%%
# * Plot confusion matrix
# customize plot
matshow_kwargs ={
'aspect': 'auto', # 'auto'equal
'interpolation': None,
'cmap':'cool'}
plot_kws ={'lw':3,
'lc':(.9, 0, .8),
'font_size':15.,
'cb_format':None,
'xlabel': 'Predicted classes',
'ylabel': 'Actual classes',
'font_weight':None,
'tp_labelbottom':False,
'tp_labeltop':True,
'tp_bottom': False
}
# replace the integer identifier with a litteral string
b.litteral_classes = ['FR0', 'FR1']# 'FR2', 'FR3']
b.plotConfusionMatrix(clf=rdf_clf, matshow_kws = matshow_kwargs,
**plot_kws)
| true |
a836d5580b838f4e9a40f89d4d37ce679f1a0dfe | Python | szarroug3/X-Ray-Creator-2 | /XRayCreator.py | UTF-8 | 11,453 | 2.609375 | 3 | [
"MIT"
] | permissive | # XRayCreator.py
import os
import sys
import argparse
import re
import httplib
from kindle.books import Books
from kindle.customexceptions import *
from time import sleep
from glob import glob
from shutil import move, rmtree
from pywinauto import *
#--------------------------------------------------------------------------------------------------------------------------END OF IMPORTS--------------------------------------------------------------------------------------------------------------------------#
MAX_LINE_LENGTH = 60
def UpdateAll():
for book in kindleBooks:
MarkForUpdate(book)
def Update():
kindleBooks.PrintListOfBooks()
books = raw_input('Please enter book number(s) of the book(s) you\'d like to update in a comma separated list: ')
books = books.replace(' ', '')
books = books.split(',')
pattern = re.compile('([0-9]+[-][0-9]+)')
for bookID in books:
if bookID.isdigit():
if int(bookID) <= len(kindleBooks):
book = kindleBooks.books[int(bookID) - 1]
MarkForUpdate(book)
elif pattern.match(bookID):
bookRange = bookID.split('-')
rangeA = int(bookRange[0])
rangeB = int(bookRange[1])
if rangeA > rangeB:
print 'Numbers are reversed. Will start with %s and end with %s' % (rangeB, rangeA)
temp = rangeA
rangeA = rangeB
rangeB = temp
if rangeA < 1:
print '%i is less than 1. Will start with 1.' % rangeA
rangeA = 1
if rangeA > len(kindleBooks):
print '%i is more than %s. Will start with %s.' % (rangeA, len(kindleBooks), len(kindleBooks))
rangeA = len(kindleBooks)
if rangeB > len(kindleBooks):
print '%i is more than %s. Will end with %s.' % (rangeB, len(kindleBooks), len(kindleBooks))
rangeB = len(kindleBooks)
if rangeB < 1:
print '%i is less than 1. Will end with 1.' % rangeB
rangeB = 1
for bookNum in xrange(rangeA, rangeB+1):
book = kindleBooks.books[int(bookNum) - 1]
MarkForUpdate(book)
else:
print 'Skipping book number %s as it is not in the list.' % bookID
def New():
for book in kindleBooks:
if not book.xrayExists:
MarkForUpdate(book)
def MarkForUpdate(book, checkForXRay=False):
book.update = True
if checkForXRay:
RemoveXRay(book)
def UnmarkforUpdate(book):
book.update = False
def RemoveXRay(book):
if book.xrayExists:
for file in glob(os.path.join(book.xrayLocation, '*')):
os.remove(file)
def SetupXRayBuilder():
# create global variables
global app, mainWindow, aliasesWindow, chaptersWindow, settingsWindow
global xrayButton, sheflariURLButton, shelfariButton, aliasesNoButton, chaptersNoButton
global bookTextBox, shelfariURLTextBox, outputTextBox, outputDir
# open X-Ray Builder GUI
app = Application().start(os.path.join('X-Ray Builder GUI','X-Ray Builder GUI.exe'))
mainWindow = app['X-Ray Builder GUI']
aliasesWindow = app['Aliases']
chaptersWindow = app['Chapters']
settingsWindow = app['Settings']
# get buttons
buttons = [button for button in mainWindow._ctrl_identifiers() if type(button) is controls.win32_controls.ButtonWrapper]
buttons.sort(key=lambda x:x.Rectangle().left)
xrayButton = buttons[6]
sheflariURLButton = buttons[2]
settingsButton = buttons[10]
settingsSaveButton = settingsWindow['SaveButton']
shelfariButton = mainWindow['ShelfariButton']
aliasesNoButton = aliasesWindow['No']
chaptersNoButton = chaptersWindow['No']
# get text boxes
textBoxes = [box for box in mainWindow._ctrl_identifiers() if type(box) is controls.win32_controls.EditWrapper]
textBoxes.sort(key=lambda x:x.Rectangle().top)
bookTextBox = textBoxes[0]
shelfariURLTextBox = textBoxes[1]
outputTextBox = textBoxes[2]
# minimize window
# mainWindow.Minimize()
# Get output directory
ClickButton(settingsButton)
settingsWindow.Wait('exists', timeout=60)
outputDir = settingsWindow['Output Directory:Edit'].Texts()[0]
ClickButton(settingsSaveButton)
app.WaitCPUUsageLower(threshold=.5, timeout=300)
# make sure Source is Shelfari
ClickButton(shelfariButton)
# make sure output directory is empty
if os.path.exists(outputDir): rmtree(outputDir)
os.mkdir(outputDir)
def ClickButton(button):
while not button.IsEnabled():
sleep(1)
button.Click()
def EditTextBox(textBox, text):
while not textBox.IsEnabled():
sleep(1)
numOfTries = 10
textBox.SetEditText(text)
while textBox.Texts()[0] != text and numOfTries > 0:
textBox.SetEditText(text)
numOfTries -= 1
if textBox.Texts()[0] == text:
return
raise CouldNotEditTextBox('could not edit text box to %s' % text)
def ProgressBar(percentage, processingText='Processing'):
progressBar = '#' * (percentage / 5)
perc = str(percentage) + '%'
# check if line is too long and shorten accordingly
if len(processingText) + 28 > MAX_LINE_LENGTH: processingText = processingText[:MAX_LINE_LENGTH-31] + '...'
sys.stdout.write('\r%s\r' % ('\0'*MAX_LINE_LENGTH)) # clear line
sys.stdout.write('%-4s |%-20s| %s' % (perc, progressBar, processingText))
sys.stdout.flush()
def UpdateASINAndUrl(books):
aConn = httplib.HTTPConnection('www.amazon.com')
sConn = httplib.HTTPConnection('www.shelfari.com')
# get and update shelfari url
print 'Updating ASINs and getting shelfari URLs'
for progress, book in enumerate(books):
ProgressBar(progress*100/len(books), processingText = book.bookNameAndAuthor)
try:
aConn, sConn = book.GetShelfariURL(aConnection=aConn, sConnection=sConn)
except Exception as e:
booksSkipped.append((book, e))
if type(e) is CouldNotFindASIN:
UnmarkforUpdate(book)
ProgressBar(100, processingText='Done.\n\n')
def CreateXRayFile(book):
ClickButton(xrayButton) # click create xray button
# wait for aliases window and respond
app.WaitCPUUsageLower(threshold=.5, timeout=300)
aliasesWindow.Wait('exists', timeout=30)
ClickButton(aliasesNoButton)
# wait for chapters window and respond
app.WaitCPUUsageLower(threshold=.5, timeout=300)
chaptersWindow.Wait('exists', timeout=5)
ClickButton(chaptersNoButton)
# wait for xray creation to be done
app.WaitCPUUsageLower(threshold=.5, timeout=300)
def MoveXRayFiles(booksUpdate):
# move x-ray files to their respective locations
xrayFiles = []
for dirName, subDirList, fileList in os.walk(outputDir):
for file in glob(os.path.join(dirName,'*.asc')):
xrayFiles.append(file)
if len(xrayFiles)> 0:
print 'Moving X-Ray Files to their directories'
for xrayFile in xrayFiles:
book = kindleBooks.GetBookByASIN(os.path.basename(xrayFile).split('.')[2])
xrayLoc = book.xrayLocation
RemoveXRay(book)
if xrayLoc and os.path.exists(xrayLoc):
move(xrayFile, xrayLoc)
def CleanUp():
# delete dmp, ext, log, out
print "Cleaning up..."
if os.path.exists(outputDir): rmtree(outputDir)
if os.path.exists('dmp'): rmtree('dmp')
if os.path.exists('ext'): rmtree('ext')
if os.path.exists('log'): rmtree('log')
if os.path.exists(os.path.join('X-Ray Builder GUI', 'dmp')): rmtree(os.path.join('X-Ray Builder GUI', 'dmp'))
if os.path.exists(os.path.join('X-Ray Builder GUI', 'log')): rmtree(os.path.join('X-Ray Builder GUI', 'log'))
if os.path.exists(os.path.join('X-Ray Builder GUI', 'out')): rmtree(os.path.join('X-Ray Builder GUI', 'out'))
#--------------------------------------------------------------------------------------------------------------------------END OF FUNCTIONS--------------------------------------------------------------------------------------------------------------------------#
# main
parser = argparse.ArgumentParser(description='Create and update kindle X-Ray files')
parser.add_argument('-u', '--update', action='store_true', help='Will give you a list of all books on kindle and asks you to return a comma separated list of book numbers for the books you want to update; Note: You can use a range in the list')
parser.add_argument('-ua', '--updateall', action='store_true', help='Deletes all X-Ray files and recreates them. Will also create X-Ray files for books that don\'t already have one')
parser.add_argument('-n', '--new', action='store_true', help='Creates X-Ray files for books that don\'t already have one')
args = parser.parse_args()
# check to make sure only one argument is chosen
numOfArgs = 0
if args.updateall: numOfArgs += 1
if args.update: numOfArgs += 1
if args.new: numOfArgs += 1
if numOfArgs > 1:
raise Exception('Please choose only one argument.')
if numOfArgs < 1:
parser.print_help()
sys.exit()
kindleBooks = Books()
if args.updateall:
UpdateAll()
elif args.update:
Update()
elif args.new:
New()
booksToUpdate = kindleBooks.GetBooksToUpdate()
if len(booksToUpdate) > 0:
global booksUpdated, booksSkipped
booksUpdated = []
booksSkipped = []
# update books' ASIN and get shelfari urls, run setup
UpdateASINAndUrl(booksToUpdate)
SetupXRayBuilder()
print 'Creating X-Ray Files'
for book in booksToUpdate:
try:
# insert book location
print '\t%s' % book.bookNameAndAuthor
EditTextBox(bookTextBox, book.bookLocation)
if book.shelfariURL:
EditTextBox(shelfariURLTextBox, book.shelfariURL)
# create xray file and add to updated list
CreateXRayFile(book)
booksUpdated.append(book)
else:
# clear shelfari url, click shelfari button and wait for it to finish
EditTextBox(bookTextBox, '')
ClickButton(sheflariURLButton)
app.WaitCPUUsageLower(threshold=.5, timeout=300)
if shelfariURLTextBox.Texts()[0]:
CreateXRayFile(book)
booksUpdated.append(book)
else:
booksSkipped.append((book, 'could not find shelfari url.'))
except Exception, e:
booksSkipped.append((book, e))
print
# close X-Ray Builder GUI
killed = False
numOfTries = 10
while not killed and numOfTries > 0:
try:
killed = app.kill_()
except:
numOfTries -= 1
if not killed:
print "Could not close X-Ray Builder GUI."
MoveXRayFiles(booksUpdated)
# print updated books
print
if len(booksUpdated) > 0:
print 'Books Updated: '
for book in booksUpdated:
print '\t%s' % book.bookNameAndAuthor
# print skipped books
print
if len(booksSkipped) > 0:
print 'Books Skipped: '
for book in booksSkipped:
if book[1] is '':
print '%s skipped because %s' % (book[0].bookNameAndAuthor, repr(book[1]))
else:
print '%s skipped because %s' % (book[0].bookNameAndAuthor, book[1])
CleanUp()
else:
print 'No books to update.'
print 'Done!' | true |
2ed5fbf3a9a28520244e6dd5dc7ce20c2a86a275 | Python | dewiniaid/sigsolve | /sigsolve/board.py | UTF-8 | 12,752 | 2.890625 | 3 | [] | no_license | import collections
import itertools
import re
from sigsolve.geometry import DEFAULT_GEOMETRY, Point, Rect
class TileBase:
"""Base class for tiles."""
def __init__(self, parent=None, number=None):
self.parent = parent
self._exists = False
self.number = number
self.bit = 0 if number is None else 1 << number
self.neighbors = []
self._element = None
self._legal = False
@property
def legal(self):
return self._legal
def real_neighbors(self):
yield from (n for n in self.neighbors if n.element)
def nonempty_neighbors(self):
yield from (n for n in self.neighbors if n.exists)
@classmethod
def bitmap(cls, tiles):
result = 0
for tile in tiles:
result |= tile.bit
return result
def _format_dict(self, *bases):
result = {
'n': (self.number is None and '?') or self.number,
'b': self.bit,
'e': self.element or 'none',
}
if self.element is None or not self.exists:
result['E'] = 'empty'
elif self._legal:
result['E'] = self.element.upper()
else:
result['E'] = self.element
if self._legal is None:
result['E'] += '?'
for base in bases:
if base:
result.update(base)
return result
def __format__(self, format_spec):
"""Allows tiles to be formatted pretty in F-strings and str.format()"""
d = self._format_dict()
return re.sub('%.', lambda match: str(d.get(match.group(0)[1], '')), format_spec)
@property
def exists(self):
return self._exists
@exists.setter
def exists(self, value):
self._setexists(value)
def _setexists(self, value):
old = self._exists
if value == old:
return # noop
if value and self.element is None:
raise AttributeError('Cannot make a tile with no element existant')
self._exists = value
if self.parent:
self.parent.tile_exists_changed(self, old, value)
@property
def element(self):
return self._element
@element.setter
def element(self, value):
self._setelement(value)
def _setelement(self, value):
old = self._element
if value == old:
return
self._element = value
if self.parent:
self.parent.tile_element_changed(self, old, value)
if not value:
self.exists = False
class Tile(TileBase):
MINADJACENT = 3 # Number of adjacent empty tiles that must be present for a move to be legal.
def __init__(self, *xy, geometry=DEFAULT_GEOMETRY, parent=None, number=None):
super().__init__(parent, number)
self._legal = None
self.geometry = geometry
self.xy = Point(*xy)
self.origin = (geometry.full_size * self.xy) + geometry.origin
if self.xy.y % 2:
self.origin += geometry.altoffset
self.rect = Rect(self.origin, self.origin + geometry.size)
self.sample_rect = self.rect + geometry.sample_insets
@property
def x(self):
return self.xy.x
@property
def y(self):
return self.xy.y
@property
def legal(self):
return self.exists and self.element is not None and self.predict_legality()
def expire_legality(self, onlyif=None):
"""Forgets current legality status, causing it to be updated on next request."""
if onlyif is not None and self._legal is not onlyif:
return
self._legal = None
def predict_legality(self, removed=None):
"""
Calculates legality status, assuming tiles in `removed` are removed.
If self._legal is already True, returns True immediately (since removing additional tiles will have no effect)
If `ignore` is None or has no impact on legality, the current cached legality status will be updated.
Reasons legality may not be affected include:
- The tile is illegal anyways.
- None of the tiles in 'ignore' are adjacent, or they all are already empty.
- Adjacency criteria are met even without the tiles in `ignore` being considered.
:param removed: Set of tiles to ignore. None = ignore no tiles.
:return: True if this tile is legal, False otherwise.
"""
if not self.exists or self.element is None:
return False
if self._legal or (not removed and self._legal is False):
return self._legal
if removed is None:
removed = set()
def _gen():
# Iterate over all neighbors. Then iterate over the first N results to handle wrapping around.
cache = []
cache_count = self.MINADJACENT - 1
for neighbor in self.neighbors:
legality_predicted = (not neighbor.exists) or neighbor in removed
if not legality_predicted:
cache_count = 0 # Stop cacheing (the 'False' results don't need to be repeated)
result = (not neighbor.exists, legality_predicted)
if cache_count:
cache.append(result)
cache_count -= 1
yield result
yield from cache
result = False # What we'll return at the end if we don't bail early.
actual_run = 0 # Actual run of legal tiles
predicted_run = 0 # Predicted run of legal tiles, counting `removed`
for actual, predicted in _gen():
if actual:
actual_run += 1
if actual_run >= self.MINADJACENT:
self._legal = True
return True
else:
actual_run = 0
if predicted:
predicted_run += 1
if predicted_run >= self.MINADJACENT:
result = True
else:
predicted_run = 0
# If we reach here, it's not ACTUALLY legal so update status accordingly.
self._legal = False
# But it might be predicted legal...
return result
def affected_neighbors(self):
"""Returns a list of neighbors that would become legal if this tile is removed."""
ignore = {self}
result = []
for neighbor in self.nonempty_neighbors():
if neighbor.predict_legality(removed=ignore):
if neighbor.legal:
continue
result.append(neighbor)
return result
@classmethod
def all_neighbors(cls, tiles):
"""
Returns the set of all neighbors of `tiles`.
:param tiles: Tiles to check
:return: All neighbors, excluding tiles in `tiles`
"""
neighbors = set()
for tile in tiles:
if tile is None:
continue
neighbors.update(tile.real_neighbors())
neighbors.discard(None)
neighbors.difference_update(tiles)
return neighbors
@classmethod
def affected_tiles(cls, tiles):
"""Returns a set of tiles that will become legal if all tiles in `tiles` are removed."""
affected = set()
for tile in cls.all_neighbors(tile for tile in tiles if tile.exists):
if tile.element is None:
continue
if tile.predict_legality(tiles) and not tile.legal: # Order matters!
affected.add(tile)
return affected
def __repr__(self):
status = (self.exists and self.element) or 'empty'
if self.exists:
if self._legal:
status = status.upper()
elif self._legal is None:
status += '?'
return f"{self.__class__.__name__}({self.x}, {self.y}) {status}"
def _format_dict(self, *bases):
return super()._format_dict({
'x': self.x,
'y': self.y
})
class DummyTile(TileBase):
def __init__(self, parent=None):
super().__init__(parent)
def _setexists(self, value):
raise AttributeError('DummyTile instances can never exist.')
def _setelement(self, value):
raise AttributeError('DummyTile instances can never have an element.')
class CatalogDictionary(collections.defaultdict):
"""
We don't want accesses to missing key to actually add data to the dictionary, so they just return a dummy value.
"""
def __missing__(self, key):
return tuple()
class Board:
CARDINALS = {'water', 'earth', 'fire', 'air'}
METALS = ('mercury', 'tin', 'iron', 'copper', 'silver', 'gold')
def __init__(self, geometry=DEFAULT_GEOMETRY):
diameter = 2*geometry.radius - 1
self.rows = []
self.tiles = []
self.dummy = DummyTile(parent=self)
self.catalog = CatalogDictionary()
# Pad with a row of empties for easier neighbor calculations later.
blank_row = list(itertools.repeat(self.dummy, diameter + 2))
self.rows.append(blank_row)
hoffset = (geometry.radius - 1) // 2 # Used for mapping screenspace coordinates to boardspace
number = 0
for y in range(0, diameter):
row = list(blank_row)
self.rows.append(row)
count = diameter - abs(geometry.radius - (y+1))
start = (diameter - count) // 2
for x in range(start, start+count):
t = Tile(x-hoffset, y, parent=self, number=number)
number += 1
self.tiles.append(t)
row[x+1] = t
# End padding, too.
self.rows.append(blank_row)
# Calculate adjacency data
for y, row in enumerate(self.rows):
altrow = -((y+1)%2)
if y == 0 or y > diameter:
continue
above = self.rows[y-1]
below = self.rows[y+1]
for x, tile in enumerate(row):
if tile is self.dummy: # Dummy tiles don't need neighbors.
continue
# Starting from the left and going clockwise
tile.neighbors = [
row[x-1], # Left
above[x+altrow], # Upper left
above[x+altrow+1], # Upper right
row[x+1], # Right
below[x+altrow+1], # Lower right
below[x+altrow], # Lower left
]
def tile_element_changed(self, tile, old, new):
"""Called when a child tile's element is changed. Used to update the catalog and legality data."""
if old == new:
return # Nothing changed.
if old is not None:
self.catalog[old].discard(tile)
if new is not None and tile.exists:
self.catalog.setdefault(new, set()).add(tile)
def tile_exists_changed(self, tile, old, new):
if old == new:
return # No element change, thus no legality changes.
if tile.element:
if new:
self.catalog.setdefault(tile.element, set()).add(tile)
elif tile.element in self.catalog:
self.catalog[tile.element].discard(tile)
for neighbor in tile.real_neighbors():
# If we're gaining an element, expire anything that was previously legal.
# If we're losing an element, expire anything that was previously not legal.
neighbor.expire_legality(new)
def legal_tiles(self):
"""Yields a list of tiles that are legal."""
return [t for t in self.tiles if t.legal]
def remaining_cardinals(self):
return {e: self.remaining(e) for e in self.CARDINALS}
def remaining_metals(self):
return list(list(self.catalog[e])[0] for e in self.METALS if self.catalog[e])
def remaining(self, element):
return len(self.catalog[element])
def bitmap(self):
"""
Returns an integer representing which tiles are empty.
"""
return TileBase.bitmap(tile for tile in self.tiles if tile.exists)
def extents(self):
"""
Returns a Rect corresponding to the entire screenspcae area needed by this board
:return:
"""
xmin, ymin, xmax, ymax = self.tiles[0].rect.coords # Arbitrary initialization
for tile in self.tiles:
for rect in tile.rect, tile.sample_rect:
xmin = min(xmin, rect.left)
xmax = max(xmax, rect.right)
ymin = min(ymin, rect.top)
ymax = max(ymax, rect.bottom)
return Rect(xmin, ymin, xmax, ymax)
| true |
03fd072b905e34a4d0e17baa1a13df096dd426f5 | Python | Elyorbek0209/SeleniumWithPython | /DownloadFILE_InChrome.py | UTF-8 | 1,746 | 3.09375 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
#---------DECLARING VARIABLES -------------
chromePath = "/home/elyor/Selenium/chromedriver"
geckoPath = "/home/elyor/Selenium/geckodriver"
URL = "https://www.toolsqa.com/automation-practice-form/"
#---------END OF THE DECLARING VARIABLES -------------
#---------- DOWNLOADING TEXT FILE -------------
#1st thing we'll import "Options" Class from "selenium.webdriver.chrome.options"
#2nd we'll create "Options" Class Object
chromeOptions = Options()
#3rd with "Options" Class object, we'll use ".add_experimental_option()" Method to Give Location for our Download
chromeOptions.add_experimental_option("prefs", {"download.default_directory": "//home//elyor//Selenium//CHROME_Download"})
#------------------------------------------------
print("Options Class object created")
#---DECLARING WEBDRIVER CHROME
driver = webdriver.Chrome(executable_path=chromePath, chrome_options=chromeOptions)
print("Chrome Class Driver Created")
#---------------------------------------------------------
#--- DELETE ALL THE COOKIES BEFORE START ---
driver.delete_all_cookies()
#---DECLARE IMPLICIT WAIT FOR ALL OBJECT---
driver.implicitly_wait(10)
#---MAXIMIZE THE WINDOW ---
driver.maximize_window()
#-----------------------------------------------------------
#1 Launching URL
driver.get(URL)
download_Element = driver.find_element_by_xpath("//a[contains(text(),'Selenium Automation')]")
#SCROLLING PAGE UNTIL ELEMENT EXIST
driver.execute_script("arguments[0].scrollIntoView()", download_Element)
time.sleep(3)
print("Page Scrolled Successfully")
#Download Link
download_Element.click()
time.sleep(3)
| true |
f4942ad059de9cc22b2d7b281652fae708b05a43 | Python | s781825175/learnpython | /8queen.py | UTF-8 | 534 | 3.25 | 3 | [] | no_license | n = 8
x = []
X = []
def conflick(k):
global x
for i in range(k):
if x[i] == x[k] or abs(x[i] - x[k]) == abs(i-k):
return True
return False
def queens(k):
global n, x, X
if k >= n:
X.append(x[:])
else:
for i in range(n):
x.append(i)
if not conflick(k):
queens(k+1)
x.pop()
def show(x):
global n
for i in range(n):
print('. ' * (x[i]) + 'X ' + '. ' * (n-x[i]-1))
queens(0)
print(X[-1], '\n')
show(X[-1])
| true |
c3f9f6649dba72146572d0f3990d6b08c5a5450e | Python | siiddd/HandwrittenDigitsRecognition | /Handwritten Digits Recognition/SimpleNN.py | UTF-8 | 1,868 | 3.34375 | 3 | [] | no_license | #Import packages
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import seaborn as sns
#Import MNIST Digits Dataset
df = keras.datasets.mnist.load_data()
x_train = df[0][0]
y_train = df[0][1]
x_test = df[1][0]
y_test = df[1][1]
#Checking the Shape of the Datasets
x_train.shape
#Visualize the Data
import matplotlib.pyplot as plt
plt.matshow(x_train[1])
y_train[1]
#Flatten the Data from (60000, 28, 28) to (60000, 784)
x_train_flat = x_train.reshape(60000, 28*28)
x_test_flat = x_test.reshape(10000, 28*28)
#Build a Simple Neural Network
model = keras.Sequential([
keras.layers.InputLayer(input_shape = (784,)),
keras.layers.Dense(units = 10, activation = 'sigmoid')
])
#Compile the Model
model.compile(loss = 'sparse_categorical_crossentropy', optimizer = 'adam', metrics=['accuracy'])
#Fit our Data into the Model
model.fit(x_train_flat, y_train, epochs = 10)
#Check the Performance on the Test Data
model.evaluate(x_test_flat, y_test)
#Compare the Predictions with Actual Data
plt.matshow(x_test[10]) #Visual Representation
y_predicted = model.predict(x_test_flat) #Array of all the Predictions
#Create a Confusion Matrix
from sklearn.metrics import confusion_matrix
y_predicted_list = [] #Create an Empty Array
for x in y_predicted:
y_predicted_list.append(np.argmax(x)) #Select the Output with the highest Probability Value
cm = confusion_matrix(y_test, y_predicted_list) #Confusion Matrix
#Visualize the Confusion Matrix on a HeatMap
sns.heatmap(cm, annot = True)
fig = plt.gcf()
fig.set_size_inches(15,15)
plt.savefig(r'C:\Users\nsid4\Desktop\Confusion_Matrix.png')
| true |
dd79b60e403a054395076489d0608ef09a4fc377 | Python | dennisdnyce/Questioner | /app/api/v1/models/meetup_models.py | UTF-8 | 1,053 | 2.6875 | 3 | [
"MIT"
] | permissive | from datetime import datetime
class MeetupRegistration():
''' class model for meetup registration '''
def __init__(self, location, images, topic, happeningOn, Tags):
self.location = location
self.images = images
self.topic = topic
self.happeningOn = happeningOn
self.Tags = Tags
self.createdOn = datetime.now()
self.All_Meetups = []
def post_a_meetup(self, meetupId, location, images, topic, createdOn, happeningOn, Tags):
''' method to post a meetup '''
my_meetup = {
"meetupId": meetupId,
"createdOn": createdOn,
"location": location,
"images": images,
"topic": topic,
"happeningOn": happeningOn,
"Tags": Tags
}
self.All_Meetups.append(my_meetup)
def get_a_meetup(self, meetupId):
''' method to get specific meetup based on its id '''
for meetup in self.All_Meetups:
if meetup['meetupId'] == meetupId:
return meetup
| true |
5cbb1fa30032ab8ae91fabb7cfee505114788afc | Python | quite-smart-stuff/smart-home | /www/heat1off.py | UTF-8 | 225 | 2.671875 | 3 | [] | no_license | import RPi.GPIO as GPIO
GPIO.setwarnings(False)
def ledoff1(pin):
GPIO.output(pin,GPIO.LOW)
print("led 1 off")
return
GPIO.setmode(GPIO.BOARD)
GPIO.setup(15, GPIO.OUT)
ledoff1(15)
GPIO.cleanup()
| true |
d7d7373a1192c66d5efcd7fbe4cee534a7bdb523 | Python | zhang-chao-zhi/autoTestBook | /5/5.1.2/try_proxy.py | UTF-8 | 302 | 2.625 | 3 | [] | no_license | from urllib import request
url = 'http://httpbin.org/ip'
proxy = {'http': '218.18.232.26:80', 'https': '218.18.232.26:80'}
proxies = request.ProxyHandler(proxy) # 创建代理处理器
opener = request.build_opener(proxies) # 创建opener对象
resp = opener.open(url)
print(resp.read().decode()) | true |
7d86b36683e4c2cae621652c67b61ebd3fc41fe7 | Python | antoniojkim/AlgLib | /Algorithms/Graphs/DFS/tests/test_DFS.py | UTF-8 | 410 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
import sys
file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(file_dir, "../"))
sys.path.append(os.path.join(file_dir, "../../"))
from graphs import create_graph
from DFS import DFS
def test_DFS_1():
G = create_graph(["A", "B", "C"], [("A", "B"), ("B", "C")])
assert DFS(G, "A", "C")
assert not DFS(G, "C", "A")
| true |
2f429f7fe46d8fd066dae2d8c2c92c173efb040e | Python | srmarcballestero/Newtons-Cradle | /Source/VariaParametre.py | UTF-8 | 2,096 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Projecte: Newton's Cradle.
- Mòdul: VariaParametre.py
- Autors: Parker, Neil i Ballestero, Marc.
- Descripció: Fer simulacions iterant un paràmetre.
- Revisió: 06/10/2020
"""
import numpy as np
from scipy import constants as const
from datetime import timedelta
import Simulacio as sim
from DataGen import simulaSistema
"""
Variables caracterísitques dels sistema.
"""
parametres_sist = {
"N": 2,
"g": const.g,
"L": 1.3,
"R": 0.010,
"gap": 1.0e-3,
"eta": 6.8e-4*0,
"gamma": 1.47e2*1,
"m": np.array([0.10, 0.10]),
"E": np.array([2.55e7, 2.55e7]),
"j": np.array([0.48, 0.48]),
"pas": 2.5e-2,
"num_osc": 30,
"salt": 10
}
parametres_sist["A"] = np.array([np.sin(4*const.pi/180)*parametres_sist["L"]]
+ [0 for i in range(parametres_sist["N"]-1)])
"""
Ús dels fitxer de dades i metadades
"""
nom_simulacio = input("Nom de la simulació?\n")
nom_directori = sim.directori_simulacions + nom_simulacio + "/"
"""
Iteració de les condicions inicials i generació de la Simulació
var: str (nom del paràmetre del sistema a iterar)
"""
nom_var = "gamma"
vars = np.linspace(50, 1500, num=200)
t_acum = 0.
ts_exec = []
t_iter = 0.
for i, var in enumerate(vars):
iter_nom_simulacio = nom_simulacio+"_"+str(i)
parametres_sist[nom_var] = var
sist = sim.Sistema(**parametres_sist)
print(f'--- Iteració {i+1} / {len(vars)} | Progrés total {((i+1) / len(vars) * 100.):.1f} % | Temps estimat {str(timedelta(seconds=(t_iter * (len(vars) - i)))).split(".")[0]} ---')
print("Generant el fitxer "+iter_nom_simulacio+".csv")
t_exec = simulaSistema(parametres_sist, nom_directori, iter_nom_simulacio)
ts_exec.append(t_exec)
t_iter = np.mean(ts_exec)
t_acum += t_exec
print(f'--- Temps d\'execució: {str(timedelta(seconds=t_exec)).split(".")[0]}.{str(timedelta(seconds=t_exec)).split(".")[1][:2]} '
+ f'| --- Temps acumulat: {str(timedelta(seconds=t_acum)).split(".")[0]}.{str(timedelta(seconds=t_acum)).split(".")[1][:2]} ---\n')
| true |
a9de4d34a33549d8024d14e3f0d5fa9fee24f0a3 | Python | ash/python-tut | /course/if2.py | UTF-8 | 87 | 3.359375 | 3 | [] | no_license | x = 10
if x < 5:
print('< 5')
elif x < 8:
print('< 8')
else:
print('>= 8') | true |
58125611b77dd0368398620fa649a08a0e2468b0 | Python | Pallavi-Jadhav/loginpygit | /log/Login.py | UTF-8 | 1,036 | 2.6875 | 3 | [] | no_license | import guizero as g
def clear_uname():
uname.clear()
def clear_pass():
password.clear()
app = g.App(title='Login', height=300, width=500, layout='grid', bg='lightblue')
title = g.Text(app, text='SIGN IN', size=40, color='blue', font='Helvetica', grid=[1, 0], align='left')
uname_label = g.Text(app, text='Enter username: ', grid=[0, 2], align='left', size=15)
uname = g.TextBox(app, text='Username', grid=[1, 2], width=45, align='left')
uname.when_clicked = clear_uname
password_lbl = g.Text(app, text='Enter password: ', grid=[0, 3], size=15, align='left')
password = g.TextBox(app, text='Password', grid=[1, 3], width=45, align='left')
password.when_clicked = clear_pass
forgot_pass = g.Text(app, text='Forgot password?', color='blue', font='Helvetica', grid=[0, 4], align='left')
login_button = g.PushButton(app, text='Login', grid=[0, 5], align='left', width=10, height=1)
login_button.text_size = 10
register_button = g.PushButton(app, text='Signup', grid=[0, 6], align='left', width=10, height=1)
app.display()
| true |
2d4e80a6d8ca5eefb7b81e399ce0abbdc271861f | Python | turpure/urrest | /urapi/firstv/dbtools/ebaydata.py | UTF-8 | 1,457 | 2.546875 | 3 | [] | no_license | import MySQLdb
import json
def get_feedback_json(sellername):
query = [
"select *,",
"concat(round(fstmonthpostive/(fstmonthpostive+fstmonthnegative)*100,2), '%') as score1,",
"concat(round(sixmonthpostive/(sixmonthpostive+sixmonthnegative)*100,2), '%') as score6,",
"concat(round(twemonthpostive/(twemonthpostive+twemonthnegative)*100,2), '%') as score12",
"from firstv_feedback where sellername='%s' order by id desc limit 1" % sellername
]
sql = " ".join(query)
try:
con = MySQLdb.connect(host='192.168.0.150', user='root', passwd='ur@2016!', db='urapi')
cur = con.cursor(MySQLdb.cursors.DictCursor)
con.set_character_set('utf8')
cur.execute('set names utf8;')
cur.execute('set character set utf8')
cur.execute('set character_set_connection=utf8;')
cur.execute(sql)
row = cur.fetchone()
row['createdDate'] = str(row['createdDate'])
row['1VS6'] = round(float(row['score1'][:-1]) - float(row['score6'][:-1]),2)
row['1VS12'] = round(float(row['score1'][:-1]) - float(row['score12'][:-1]), 2)
row['6VS12'] = round(float(row['score6'][:-1]) - float(row['score12'][:-1]), 2)
ret = json.dumps(row)
return ret
except Exception as e:
error = json.dumps({"error":"no feedback data"})
return error
if __name__ == '__main__':
print get_feedback_json('sunshinegirl678') | true |
77435cf9c7e53413cdc69114739f85d8653df882 | Python | hbyhl/utils4py | /utils4py/data/neo4j.py | UTF-8 | 1,875 | 2.53125 | 3 | [] | no_license | #!usr/bin/env python
# -*- coding: utf-8 -*-
# Desc:
# FileName: neo4j.py
# Author:yhl
# Version:
# Last modified: 2020-02-28 11:12
import threading
from py2neo import Graph
from utils4py import ConfUtils
_neo4j_conf = ConfUtils.load_parser("data_source/neo4j.conf")
_conn_pool = dict()
_reuse_mutex = threading.RLock()
def connect(section, settings_reuse_pool=True):
"""
:param section:
:rtype: Database
"""
if settings_reuse_pool:
with _reuse_mutex:
if section not in _conn_pool:
db_obj = _ConnectParams().init_with_section(section).connect()
if db_obj:
_conn_pool[section] = db_obj
return _conn_pool[section]
else:
return _ConnectParams().init_with_section(section).connect()
class _GraphWrapper(object):
def __init__(self,graph):
self._graph = graph
def execute(self, cypher, **kwargs):
cur = self._graph.run(cypher, **kwargs)
data = cur.data()
cur.close()
return data
class _ConnectParams(object):
"""
neo4j connect params
"""
def __init__(self):
self._user = "neo4j"
self._password = "password"
self._host = "localhost"
self._port = 7474
self._scheme = 'http'
pass
def init_with_section(self, section):
conf = dict(_neo4j_conf.items(section=section))
self._user = conf.get("user", "neo4j")
self._password = conf.get("password", "password")
self._host = conf.get("host", "localhost")
self._port = int(conf.get("port", 7474))
self._scheme = conf.get('scheme','http')
return self
def connect(self):
graph = Graph(username=self._user, password=self._password,host=self._host,port=self._port,scheme=self._scheme)
return _GraphWrapper(graph)
| true |
142324945985721968ca37f0304fc43bf125c5c7 | Python | harsh6292/Behavioral-Cloning-CarND | /model.py | UTF-8 | 7,515 | 3.140625 | 3 | [] | no_license | #import keras
import csv
import cv2
import numpy as np
DBG = True
lines = []
# Load Udacity training data
udacity_training_log_file = 'udacity_data/driving_log.csv'
if (DBG):
print(udacity_training_log_file)
with open(udacity_training_log_file) as csvfile:
reader = csv.reader(csvfile)
# Read each line in driving_log.csv
count = 0
for line_in_file in reader:
# Udacity driving_log.csv file has first line as column name not actual data which gives error while training
if count == 0:
count = 1
continue
lines.append(line_in_file)
len_udacity_data = len(lines)
if DBG:
print("Total udacity training images: {}".format(len_udacity_data))
# Append my own training data to udacity's training data
own_training_log_file = 'own_training_data/driving_log.csv'
with open(own_training_log_file) as train_csvfile:
reader = csv.reader(train_csvfile)
# Read each line in driving_log.csv
for line_in_file in reader:
lines.append(line_in_file)
total_data = len(lines)
if DBG:
print("Total images from my own training data: {}".format((total_data-len_udacity_data)))
images = []
measurements = []
i =0
img_data_dir = 'image_data/'
# Method to get image file and store it using opencv
def process_image(img_path):
filename = img_path.split('/')[-1]
current_path = img_data_dir + 'IMG/' + filename
image = cv2.imread(current_path)
return image
# Process all the images in driving_log (left, center, right)
# Add the steering measurements and store it
for line in lines:
if DBG and i < 2:
print('Processing line: {}'.format(line))
# Extract hood image path from each line (Center image)
img_center = process_image(line[0])
img_left = process_image(line[1])
img_right = process_image(line[2])
if DBG and i < 1:
print("Image shape from opencv: {}".format(img_center.shape))
steer_angle_center = float(line[3])
i += 1
# Ignore most of the straight angles
if steer_angle_center < 0.05 and steer_angle_center > -0.05:
continue
# Save the opencv image to a list for processing later
images.append(img_center)
images.append(img_left)
images.append(img_right)
# Extract steering angle from each line
if DBG and i < 2:
print("Measurement from file: {}".format(steer_angle_center))
# Steering correction angle
correction = 0.067
steer_angle_left = steer_angle_center + correction
steer_angle_right = steer_angle_center - correction
# Add measurement to a list of measurements
measurements.append(steer_angle_center)
measurements.append(steer_angle_left)
measurements.append(steer_angle_right)
if DBG:
print("Total images: {}, total measurements: {}".format(len(images), len(measurements)))
# Image augmentation using flipped images
augmented_images, augmented_measurements = [], []
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
# Flip the image and measurement respectively
augmented_images.append(cv2.flip(image, 1))
augmented_measurements.append(measurement*-1.0)
if DBG:
print("Total augmented images: {}, total augmented measurements: {}".format(len(augmented_images), len(augmented_measurements)))
# Convert all images read through opencv to numpy arrays, our training data
X_train = np.array(augmented_images)
# Create label array as numpy array using steering angle measurements
y_train = np.array(augmented_measurements)
print('Total training data: Input shape: {}, Label shape: {}'.format(X_train.shape, y_train.shape))
#########################
# Using Generators
#########################
# Split the training data into train and validation samples
import sklearn
from sklearn.model_selection import train_test_split
X_train_samples, X_valid_samples, y_train_samples, y_valid_samples = train_test_split(X_train, y_train, test_size=0.2)
if DBG:
print("Train samples: {}, train labels: {}, valid samples: {}, valid labels: {}".format(X_train_samples, X_valid_samples, y_train_samples, y_valid_samples))
# Define a generator to be used for training and validation inputs
def generator(features, labels, batch_size=32):
num_samples = len(features)
# Run the loop forever, yield will return samples to model
while 1:
for offset in range(0, num_samples, batch_size):
batch_input = features[offset : (offset + batch_size)]
batch_label = labels[offset : (offset + batch_size)]
yield sklearn.utils.shuffle(batch_input, batch_label)
train_generator = generator(X_train_samples, y_train_samples)
validation_generator = generator(X_valid_samples, y_valid_samples)
#########################
# Build model using keras
#########################
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Cropping2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.core import SpatialDropout2D
from keras.callbacks import ModelCheckpoint
# Build a Sequential model with convolution and dense layers
model = Sequential()
# Add input layer, crop the images first
model.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)))
#Add Lambda normalization layer
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
######################
# Layer-1 Convolution
######################
# Filter size = 16, kernel_size = 5x5, padding = valid,
model.add(Convolution2D(8, 5, 5, border_mode='valid', activation=None))
# Add a LeakyReLU activation function, similar to ReLU, but with very small dependence on negative values
model.add(LeakyReLU(alpha=0.15))
# Add a max pooling layer to avoid overfitting
model.add(MaxPooling2D(pool_size=(2, 2) , strides=None, border_mode='valid'))
######################
# Layer-2 convolution
######################
model.add(Convolution2D(12, 5, 5, border_mode='valid', activation=None))
model.add(LeakyReLU(alpha=0.15))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='valid'))
######################
# Layer-3 convolution
######################
model.add(Convolution2D(16, 3, 3, border_mode='valid', activation=None))
model.add(LeakyReLU(alpha=0.15))
# Add spatial dropout layer instead of max pooling to prevent overfitting
model.add(SpatialDropout2D(p=0.2))
#model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='valid'))
#################################################
# Use either globalpooling layer or flatten layer
#model.add(GlobalAveragePooling2D())
model.add(Flatten())
#########################
# Fully connected layers
#########################
model.add(Dense(1500))
model.add(Dense(300))
model.add(Dense(1))
# Print the summary of the model
model.summary()
# Compile the model
model.compile(loss='mse', optimizer='adam')
# Create a model checkpoint to save the best model
model_checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', verbose=1, save_best_only=True)
callbacks = [model_checkpoint]
# Fit the data to model
#model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=10, callbacks=callbacks)
########################################################################
# Use fit_generator to process part of data and save the best model only
########################################################################
model.fit_generator(train_generator, samples_per_epoch=len(X_train_samples), nb_epoch=10, callbacks=callbacks, validation_data=validation_generator, nb_val_samples=len(X_valid_samples))
# Save the model
#model.save('model.h5')
# End
| true |
3a58db076c873504fdac452dc36debc6659efc4b | Python | 0x17/SP-Simulation | /spmergetraces.py | UTF-8 | 2,252 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
import os
TIME_LIMIT = 1
instance_names = []
opt_profits = {}
with open('OptimalResults.txt', 'r') as fp:
for line in fp.readlines()[1:]:
parts = line.split(';')
instance_name = parts[0].rstrip()
opt_profit = float(parts[1].rstrip())
opt_profits[instance_name] = opt_profit
instance_names.append(instance_name)
instance_names.sort()
time_points = [ x / 100 for x in range(TIME_LIMIT*100) ]
def trace_files_for_instance(instance_name):
return [ fn for fn in os.listdir('.') if fn.startswith(instance_name+'_') ]
def solver_name_from_trace_filename(trace_fn):
return trace_fn.split('_')[1].replace('Trace.txt', '')
def clean_profit_str(ps): return ps.replace('-inf', '0.0')
traces = {}
for instance_name in instance_names:
traces[instance_name] = {}
for tfn in trace_files_for_instance(instance_name):
slv = solver_name_from_trace_filename(tfn)
if slv not in traces[instance_name]:
traces[instance_name][slv] = []
with open(tfn, 'r') as fp:
for line in fp.readlines()[1:]:
parts = line.split(';')
traces[instance_name][slv].append((float(parts[0].rstrip()), float(clean_profit_str(parts[1].rstrip()))))
def best_profit_up_to(instance_name, slv, tp):
bput = 0.0
for tau,itsprofit in traces[instance_name][slv]:
if tau <= tp and itsprofit > bput:
bput = itsprofit
if tau > tp: break
return bput
def gap(obj, optimal_obj): return max(0.0, (optimal_obj-obj)/optimal_obj if optimal_obj > 0 else 0.0)
def avg(lst): return sum(lst) / len(lst)
ostr = 'time;Gurobi;LocalSolver;ParticleSwarm;FullEnumeration\n'
for tp in time_points:
avggaps = []
for slv in ['Gurobi', 'LocalSolver', 'ParticleSwarm', 'FullEnumeration']:
gaps = []
for instance_name in instance_names:
optref = opt_profits[instance_name]
bput = best_profit_up_to(instance_name, slv, tp)
gaps.append(gap(bput, optref))
avggaps.append(avg(gaps))
avggapsstr = ';'.join([ '{:.4f}'.format(g) for g in avggaps])
ostr += f'{str(tp)};{avggapsstr}\n'
with open('spmergedtraces.txt', 'w') as fp:
fp.write(ostr) | true |
b33176a6805bfc96a4f84c1e62c5613055e7d408 | Python | Ahnseungwan/Phython_practice | /2020.12/12.30/12.30 변수.py | UTF-8 | 529 | 4.03125 | 4 | [] | no_license | # 애완동물을 소개해 주세요
animal = "고양이"
name = "연탄이"
age = 4
hobby = "산책"
is_adult = age >= 3
print("우리집 "+ animal +"의 이름은 "+ name +"예요")
hobby = "공놀이"
# print(name + "는" + str(age) + "살이며, "+ hobby + "을 아주 좋아해요") #정수 앞에선 str을 넣어준다
print(name, "는" , age , "살이며, ",hobby,"을 아주 좋아해요") #정수 앞에선 str을 넣어준다
print(name + "는 어른일까요? " + str(is_adult)) #True같은 경우도 str 해준다
| true |
ff27c5de522d8ca0b33a25bbe7ce624ec7223fc3 | Python | multikillerr/Hacking | /server_get.py | UTF-8 | 245 | 2.578125 | 3 | [] | no_license | #!usr/bin/python27
import sys
import socket
import os
s=socket.socket(sock.AF_INET, sock_STREAM)
try:
connection=s.bind(127.0.0.1, 8000)
except:
print("Could not bind on the ip provided")
while True:
data=s.recv(1024)
print data
| true |
a90662f3f4f4c496fa2633103c7567fbed6ea996 | Python | ffabut/kreap2 | /4/examples/post-method/main.py | UTF-8 | 1,707 | 3.125 | 3 | [] | no_license | import tornado.ioloop
import tornado.web
#jednoducha ukazka toho, jak prijimat data skrze POST request
#na index page se zobrazuje index.html soubor, ktery obsahuje html <form> pro zadani dat
#zadana data se posilaji jako POST request na adresu /enterdata
#kde je zpracuje EnterDataHandler pomoci metody post()
class MainHandler(tornado.web.RequestHandler):
"""
MainHandler obstarava index page na adrese "/"
"""
def get(self):
self.render("index.html")
class EnterDataHandler(tornado.web.RequestHandler):
"""
EnterDataHandler obstarava adresu "/enterdata"
"""
def get(self):
# o GET request na adrese /enterdata nestojime
# kdyby nahodou GET request dosel, tak presmerujeme na "/", kde je stranka pro zadani dat
self.redirect("/")
def post(self):
# u POST requestu zpracujeme prichozi data
# jmeno argumentu se musi shodovat s tim, jak je pojmenovane pole ve formulari <form>
jmeno = self.get_argument("name") #pokud nezadame pojmenovany parametr "default", pak dojde k chybe pri nedodani dat
email = self.get_argument("mail", default="email") #pokud zadame default, pak pri nedodani dat se pouzije hodnota v default
self.write(jmeno + " " + email) #nakonec vratime stranku s jednoduchym echem vlozenych dat
#pripadne taky muze byt render() nebo redirect()
def make_app():
return tornado.web.Application([
(r"/", MainHandler), #hlavni handler pro index page
(r"/enterdata", EnterDataHandler), #handler pro adresu, kam se POSTuji data
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start() | true |
23953781838c0e06c118732f940464e4dd183424 | Python | midhun999/Diabetes-Predictor-ML-Web-App1 | /diabetes_pred.py | UTF-8 | 2,024 | 3.1875 | 3 | [] | no_license | import numpy as np
import pickle
import pandas as pd
import streamlit as st
pickle_in = open("model_svc_pickle", "rb")
classifier = pickle.load(pickle_in)
df = pd.read_csv('diabetes.csv')
df_features = df.iloc[:,0:8]
def diabetes_prediction(Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age):
prediction = classifier.predict([[Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age]])
print(prediction)
return prediction
def main():
st.title('')
html_temp = """
<div style="background-color:#546beb; padding:10px">
<h2 style="color:#93f50a;text-align:center;">Diabetes Predictor </h2>
</div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
Pregnancies = st.sidebar.slider('Pregnancies', 0, 20, 1)
Glucose = st.sidebar.slider('Glucose', 0.0, 200.0, 85.0)
BloodPressure = st.sidebar.slider('Blood Pressure', 0.0,140.0, 66.0 )
SkinThickness = st.sidebar.slider('Skin Thickness', 0.0, 100.0, 29.0)
Insulin = st.sidebar.slider('Insulin', 0.0, 900.0, 0.0)
BMI = st.sidebar.slider('BMI', 0.0, 70.0, 26.6)
DiabetesPedigreeFunction = st.sidebar.slider('Diabetes Pedigree Function', 0.0, 4.0, 0.351)
Age = st.sidebar.slider('Age', 21, 100, 31 )
# printing user inputs
user_input = [[Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age]]
col_head = list(df_features.columns.values)
f = pd.DataFrame(user_input,columns = col_head)
st.write('Your Input')
st.write(f)
fr = 10
if st.button("Predict"):
r1 = diabetes_prediction(float(Pregnancies), float(Glucose), float(BloodPressure), float(SkinThickness), float(Insulin), float(BMI), float(DiabetesPedigreeFunction), float(Age))
r2 = r1.tolist()
fr = r2[0]
result = fr
if result == 0:
st.success("You don't have diabetes")
elif result == 1 :
st.success("You have diabetes")
else :
st.write("Click on Predict")
main()
| true |
e4c1d8837c72eb401bed19ad0f4c5e3e51b13df5 | Python | SLKyrim/vscode-leetcode | /0590.n叉树的后序遍历.py | UTF-8 | 2,133 | 3.6875 | 4 | [] | no_license | #
# @lc app=leetcode.cn id=590 lang=python3
#
# [590] N叉树的后序遍历
#
# https://leetcode-cn.com/problems/n-ary-tree-postorder-traversal/description/
#
# algorithms
# Easy (71.16%)
# Likes: 55
# Dislikes: 0
# Total Accepted: 17.1K
# Total Submissions: 23.7K
# Testcase Example: '[1,null,3,2,4,null,5,6]\r'
#
# 给定一个 N 叉树,返回其节点值的后序遍历。
#
# 例如,给定一个 3叉树 :
#
#
#
#
#
#
#
# 返回其后序遍历: [5,6,3,2,4,1].
#
#
#
# 说明: 递归法很简单,你可以使用迭代法完成此题吗?
#
# @lc code=start
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def postorder(self, root: 'Node') -> List[int]:
# 取巧迭代:(逆前序再逆序即后序)从左向右将当前节点压入栈,最后输出结果逆序
if not root:
return []
res = list()
stack = [root]
while stack:
node = stack.pop()
res.append(node.val)
for child in node.children:
stack.append(child)
return res[::-1]
# 迭代:超时
# if not root:
# return []
# res = list()
# visNode = set()
# stack = [root]
# while stack:
# node = stack[-1]
# childNum = len(node.children)
# visited = [True for i in range(childNum)]
# for i in range(childNum - 1, -1, -1):
# child = node.children[i]
# if child not in visNode:
# visited[i] = False
# stack.append(child)
# if len(set(visited)) == 1:
# stack.pop()
# res.append(node.val)
# visNode.add(node)
# return res
### 递归
# if not root:
# return []
# res = list()
# for child in root.children:
# res += self.postorder(child)
# res += [root.val]
# return res
# @lc code=end
| true |
664ed9ca5607f8364e6ebe1ed417666127aa185a | Python | gieoon/Generate-Websites-with-AI | /RL2/main.py | UTF-8 | 874 | 2.75 | 3 | [] | no_license | # Implement q-learning.
import numpy as np
from flask import Flask, render_template
from flask_socketio import SocketIO
from action import generateHTMLAction, displayHTMLFile
app = Flask(__name__)
socketio = SocketIO(app)
k = 5
# Number of steps before human intervention
ACTION_STEPS = 10
@app.route('/')
def run():
count = 0
target = 1# np.array() # What input is to be used here?
while True and count < ACTION_STEPS:
count += 1
step(target)
if count % ACTION_STEPS == 0:
# Generate and send to discriminator
#print("target: ", ''.join(target))
return ''.join(target)
@socketio.on('message')
def handle_message(message):
print('received message: ' + message)
@socketio.on('json')
def handle_json(json):
print('received json: ' + str(json))
if __name__ == '__main__':
socketio.run(app)
| true |
efbd806d7aa95a4e045de7b69495c7f2e1d564f8 | Python | hdelei/espsemaphore | /check_tests.py | UTF-8 | 1,156 | 2.75 | 3 | [] | no_license | #Script para chamar outro script em caso de modificação
from os import path, system
import platform
from time import sleep
import requests
def windows_loop():
file = 'programa.py'
url = 'http://192.168.25.9/set?{}=on'
create_time = path.getctime(file)
while(True):
system('ECHO|SET /p="."')
mod_time = path.getmtime(file)
if mod_time != create_time:
system('cls')
exit_status = system(file)
if exit_status == 0:
requests.get(url.format('green'))
else:
requests.get(url.format('red'))
create_time = mod_time
sleep(1)
def main():
if platform.system() == 'Windows':
windows_loop()
elif platform.system() == 'Linux':
system('sh check_tests.sh')
else:
print('unsupported platform')
if __name__ == "__main__":
main() | true |
4aa7d91833a8ef3b0b3293d100a43f252e904eff | Python | k-harada/AtCoder | /ABC/ABC101-150/ABC145/C.py | UTF-8 | 850 | 3.390625 | 3 | [] | no_license | import math
def solve(n, x_list, y_list):
d_total = 0.0
for i in range(n - 1):
for j in range(i + 1, n):
d_total += math.sqrt((x_list[i] - x_list[j]) ** 2 + (y_list[i] - y_list[j]) ** 2)
return d_total * 2 / n
def main():
n = int(input())
x_list = [0] * n
y_list = [0] * n
for i in range(n):
x, y = map(int, input().split())
x_list[i] = x
y_list[i] = y
res = solve(n, x_list, y_list)
print(res)
def test():
assert abs(solve(3, [0, 1, 0], [0, 0, 1]) - 2.2761423749) < 0.000001
assert abs(solve(2, [-879, -866], [981, 890]) - 91.9238815543) < 0.000001
assert abs(solve(
8, [-406, 512, 494, -955, 128, -986, 763, 449], [10, 859, 362, -475, 553, -885, 77, 310]
) - 7641.9817824387) < 0.000001
if __name__ == "__main__":
test()
main()
| true |
57b2079b2d4b459c3f29803454285af526c43d53 | Python | PascalVA/adventofcode2018 | /dec1/dec1.py | UTF-8 | 384 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python
dup = False
freq = 0
seen = []
with open("input.txt", "r") as f:
inList = f.read().splitlines()
while not dup:
for item in inList:
freq = freq + int(item)
if freq in seen:
dup = freq
break
seen.append(freq)
print("PART 1: %d" % dupl)
print("PART 2: %d" % reduce(lambda x, y: int(x) + int(y), inList))
| true |
1e481f552a9a47fb66be31e5c0d3b7656ab0cf4d | Python | Misk77/Python-Svenska-Gramas | /Python svenska - 5 - Flödeskontroll.py | UTF-8 | 120 | 3.203125 | 3 | [] | no_license | age = 18
if age >= 18:
print ("Grattis! du får köra bil!")
else:
print ("Tyvärr du får vänta några år")
| true |
78c53a66c67edddb3b991bb3733e3345b1b661ba | Python | FlavioImbertDomingos/repo-scraper | /repo_scraper/filetype.py | UTF-8 | 151 | 2.5625 | 3 | [
"MIT"
] | permissive | import re
def get_extension(filename):
try:
return re.compile('.*\.(\S+)$').findall(filename)[0].lower()
except:
return None
| true |
886d044cc305cddc61861069563d3ffbbcc859de | Python | Camila2301/PARCIAL_4 | /Punto1 (1).py | UTF-8 | 464 | 3.890625 | 4 | [] | no_license | """El siguiente codigo calcula e imprime"""
"""Sumatoria de Riemann"""
"""Autor:Maria Camila Vargas Giraldo"""
"""Ultima actualizacion:22 de septiembre/2021"""
import numpy as np
def Zeta(n):
r=0 # inicializo la variable que va a guardar la suma
for i in range(1,n+1): # este ciclo hace la sumatoria
r=r+(i**(-2)) # se está sumando r que guarda la sumatoria hasta i-1 con el i-esimo termino.
return r
# viendo la aproximación
print((Zeta(10000)))
print((np.pi**2)/6)
| true |
3e0caf2547b030722d774adf07b03dfe884160d5 | Python | tanvijain13/CS5590-490-0001-Python-and-Deep-Learning-Programming- | /ICP1/Source Code/replace.py | UTF-8 | 249 | 3.453125 | 3 | [] | no_license | str="I love playing with python"
split= str.split()
lis=[]
final_string=""
for i in split:
if i == "python":
i = "pythons"
lis.append(i)
for x in lis:
final_string += x
final_string += " "
print(final_string)
| true |
50904894944954ec7e879abe144d10c6a78bacf4 | Python | aiventures/tools | /code_snippets/sample_inspect/module_loader_example.py | UTF-8 | 3,013 | 2.796875 | 3 | [
"MIT"
] | permissive | """ loading python modules programmatically can be used for inspect """
import sys
import logging
import os
from pathlib import Path
from importlib import util as import_util
from os import walk
logger = logging.getLogger(__name__)
class ModuleLoader():
def __init__(self,p_root) -> None:
if os.path.isdir(p_root):
self._p_root = Path(os.path.abspath(p_root))
logger.info(f"{p_root} initialized")
else:
logger.error(f"{p_root} is not a valid path, check")
return
self._module_paths={}
self._walk_paths()
self._loaded_modules=[]
self._load_modules()
def _walk_paths(self):
""" iterate through directories of root path and check for any module paths """
module_paths={}
for subpath,_,files in os.walk(self._p_root):
logger.debug(f"Walk path {subpath}")
module_dict={}
is_package = False
for f in files:
f_path = Path(f)
if f_path.suffix[1:] == "py":
if not f_path.stem == "__init__":
module_dict[ f_path.stem]=f_path
logger.debug(f"found python file {f}")
if f == "__init__.py":
is_package = True
logger.debug(f"found module path {subpath}")
if is_package:
module_paths[subpath] = module_dict
self._module_paths = module_paths
def _load_modules(self):
""" load modules from module paths """
for package_path,files in self._module_paths.items():
p_package = Path(package_path)
package_parts = p_package.parts
# get path elements relative to root
main_package = ".".join(package_parts[len(self._p_root.parts):])
logger.info(f"Process folder {package_path} as package [{main_package}]")
for module,module_file_name in files.items():
p_module = Path.joinpath(p_package,module_file_name)
module_name = main_package+"."+module
logger.info(f"Loading module {module_name}")
spec = import_util.spec_from_file_location(module_name, p_module)
import_module = import_util.module_from_spec(spec)
sys.modules[module_name] = import_module
spec.loader.exec_module(import_module)
self._loaded_modules.append(module_name)
logger.info(f"Loaded Modules {self._loaded_modules}")
if __name__ == "__main__":
loglevel=logging.INFO
logging.basicConfig(format='%(asctime)s %(levelname)s %(module)s:[%(name)s.%(funcName)s(%(lineno)d)]: %(message)s',
level=loglevel, stream=sys.stdout,datefmt="%Y-%m-%d %H:%M:%S")
# root path / set path to path of this executable so that demo modules are loaded
root_path = Path(__file__).parent
module_loader = ModuleLoader(root_path)
pass
| true |
27cad35a2acd68851031bf03f66e6cc7592bd498 | Python | awesomewyj/54young | /unit/test_demo.py | UTF-8 | 543 | 2.9375 | 3 | [] | no_license | import unittest
class TestDemo(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
print("setupcalss")
def setUp(cls) -> None:
print("setup")
@classmethod
def tearDownClass(cls) -> None:
print("tearDownClass")
def tearDown(cls) -> None:
print("tearDown")
def test_sum(self):
x = 1 + 2
print(x)
self.assertEqual(4, x, f"{x} expection=3")
def test_demo(self):
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
| true |
95ea2d51105ce5fd13f4a3cf936b1c9c0e10d455 | Python | sohumh/encryption | /cryptoCracker.py | UTF-8 | 7,466 | 3.421875 | 3 | [] | no_license | import enchant
from random import randint
"""
FUTURE NOTES
Does not support punctuation
Does not work efficiently on long inserts for the subsitution decoder (nor does it return the correct answer on small ones)
Caesar cipher works perfectly fine
GOALS
connect to a web app
"""
class Answers():
def __init__(self, c, default = ['e', 't', 'a', 'o', 'i', 'n', 's', 'r', 'h', 'd', 'l', 'u', 'c', 'm', 'f', 'y', 'w', 'g', 'p', 'b', 'v', 'k', 'x', 'q', 'j', 'z'], o = False):
self.code = c.lower()
self.alphabet = default
self.has_spaces = False
self.dictionary = self.create_dict()
def create_dict(self):
"""Return a list that has each alphabet letter in code ordered by frequency """
d = {}
for char in self.code:
if char.isspace():
self.has_spaces = True
continue
elif char not in d:
d[char] = 1
else:
d[char] += 1
return d
def next(self):
""" Spit's out next most likely guess """
try:
return next(self.spitter)
except:
return False
class Substution(Answers):
def __init__(self, c, default = ['e', 't', 'a', 'o', 'i', 'n', 's', 'r', 'h', 'd', 'l', 'u', 'c', 'm', 'f', 'y', 'w', 'g', 'p', 'b', 'v', 'k', 'x', 'q', 'j', 'z'], o = False):
Answers.__init__(self, c, default)
if o == False:
self.ordered = self.sort_dict()
else:
self.ordered = o
self.spitter = self.spit()
def all_but(self, i, lst):
return lst[:i] + lst[i + 1:]
def sort_dict(self):
sorted_items = sorted(self.dictionary.items(), key = lambda a: a[1], reverse = True)
return [item[0] for item in sorted_items]
def which_changed(self, code_pop, alpha_pop):
def f(char):
if (char == code_pop):
return True
else:
return False
return [f(i) for i in self.code]
def final_ans(self, changed_bools, changed_answer, alpha_pop):
def f(i):
if changed_bools[i]:
return alpha_pop
else:
return changed_answer[i]
new_ans = [f(i) for i in range(len(changed_answer))]
return "".join(new_ans)
def all_outputs(self, i):
one = self.all_but(i, self.alphabet)
two = self.alphabet[i]
three = self.ordered[1:]
four = self.ordered[0]
return one, two, three, four
def spit(self):
""" Generator that yields all possible interpretations """
if not self.code:
return
if not self.ordered:
yield self.code
return
for i in range(len(self.alphabet)):
new_alphabet, alpha_pop, new_ordered, code_pop = self.all_outputs(i)
next_ans = Substution(self.code, new_alphabet, new_ordered)
for changed_answer in next_ans.spit():
changed_bools = self.which_changed(code_pop, alpha_pop)
yield self.final_ans(changed_bools, changed_answer, alpha_pop)
class Caesar(Answers):
def __init__(self, c):
Answers.__init__(self, c)
self.spitter = self.spit()
def spit(self):
""" Spits out the 26 potential answers in order of most likely """
max_elem = ord(max(self.dictionary.items(), key = lambda a: a[1])[0])
for index in range(26):
shift = ord(self.alphabet[index]) - max_elem #check if the most popular was this one
yield self.shifted_by(shift)
def shifted_by(self, diff):
ans = []
for char in self.code:
if char.isspace():
ans.append(char)
else:
letter = ord(char) + diff
if letter < 97:
letter = 123 - (97 - letter)
elif letter > 122:
letter = letter % 123 + 97
ans.append(chr(letter))
return "".join(ans)
class Decoder:
def __init__(self, instance, num = None):
self.webster = enchant.Dict('en_US')
self.instance = instance # Needs to be some Answers instance
if num == None:
self.num_words = len(self.instance.code)
else:
self.num_words = num
self.answer_gen = self.answers()
def next(self):
""" Spit's out next most likely guess """
try:
return next(self.answer_gen)
except:
return False
def answers(self):
return self.get_answers(self.num_words)
def get_answers(self, num_words):
""" A Generator that yields all potential answers """
word = ""
while word is not False:
word = self.instance.next()
if self.instance.has_spaces:
yield from self.dissect_spaces(word)
else:
yield from self.dissect(word, num_words)
def dissect(self, word, num):
""" We want to see if any words can be constructed from this string """
if not num or not word or len(word) < 2:
return
if self.webster.check(word):
yield word
for i in range(1, len(word)):
first, rest = word[:i], word[i:]
if len(first) >= 2 or first == 'i' or first == 'a':
if self.webster.check(first):
for answer in self.dissect(rest, num - 1):
yield first + " " + answer
def dissect_spaces(self, word):
""" Check if the given string is a sentence """
if not word:
return
text = word.split()
for w in text:
if not self.webster.check(w):
return
yield word
class Encode_Caesar():
def __init__(self, word):
self.alphabet = ['e', 't', 'a', 'o', 'i', 'n', 's', 'r', 'h', 'd', 'l', 'u', 'c', 'm', 'f', 'y', 'w', 'g', 'p', 'b', 'v', 'k', 'x', 'q', 'j', 'z']
self.word = word.lower()
self.shift = randint(0, len(self.alphabet) - 1)
self.encoded = self.encode()
def encode(self):
ans = []
for char in self.word:
if char.isspace():
ans.append(char)
else:
letter = ord(char) + self.shift
if letter < 97:
letter = 123 - (97 - letter)
elif letter > 122:
letter = letter % 123 + 97
ans.append(chr(letter))
return "".join(ans)
class Encode_Substution():
def __init__(self, word):
self.alphabet = ['e', 't', 'a', 'o', 'i', 'n', 's', 'r', 'h', 'd', 'l', 'u', 'c', 'm', 'f', 'y', 'w', 'g', 'p', 'b', 'v', 'k', 'x', 'q', 'j', 'z']
self.word = word
self.d = self.dictionize()
self.encoded = self.encode()
def dictionize(self):
d = {}
for char in self.word:
if char not in d:
d[char] = self.random_char()
return d
def encode(self):
return "".join([self.d[char] for char in self.word])
def random_char(self):
return self.alphabet.pop(randint(0, len(self.alphabet) - 1))
def test(word):
e = Encode_Caesar(word)
print(e.encoded)
s = Caesar(e.encoded)
d = Decoder(s, 1)
print(list(d.answer_gen))
test("butterfly")
| true |
ab2db76f3c99d8412dda3bc1ebfc0f95b052b45a | Python | Aurora-yuan/Leetcode_Python3 | /0476 数字的补数/0476 数字的补数.py | UTF-8 | 1,046 | 4.375 | 4 | [] | no_license | #label: 位运算 difficulty: easy
"""
第一种思路:
最简单的按照题意的思路:
先得到输入的二进制形式,再逐位取反, 最后转回十进制。
"""
class Solution:
def findComplement(self, num: int) -> int:
s = bin(num)[2:] #转换成二进制有“0b”前缀
b = ""
for ch in s:
if ch == "0":
b += "1"
else:
b += "0"
# print b
return int(b,2)
“”“
第二种思路:
在将正整数处理为二进制的过程中,如果某一位是0,那么结果就直接加上 2** pos,pos是当前的位置,
这样处理完二进制之后即可直接得到答案。
”“”
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
res = 0
pos = 0
while(num >= 2):
temp = num % 2
if not temp:
res += 2 ** pos
num /= 2
pos += 1
return res
| true |
585a319d78dae14250f4cf29551cedb1308ca277 | Python | wency1111/new_chat | /chat_server.py | UTF-8 | 4,051 | 3.546875 | 4 | [] | no_license | """
socket fork 练习
群聊聊天室
功能 : 类似qq群功能
【1】 有人进入聊天室需要输入姓名,姓名不能重复
【2】 有人进入聊天室时,其他人会收到通知:xxx 进入了聊天室
【3】 一个人发消息,其他人会收到:xxx : xxxxxxxxxxx
【4】 有人退出聊天室,则其他人也会收到通知:xxx退出了聊天室
【5】 扩展功能:服务器可以向所有用户发送公告:管理员消息: xxxxxxxxx
"""
"""
1.技术点的确认
*转发模型:客户端--》服务端--》转发给其他客户端
*网络模型:UDP通信
*保存用户信息 [(name,addr),(...)] {name:addr}
*收发关系处理:采用多进程分别进行收发操作
2.结构设计
*采用什么样的封装结构:函数
*编写一个功能,测试一个功能
*注意注释和结构的设计
3.分析功能模块,制定具体编写流程
*搭建网络连接
*进入聊天室
客户端:*输入姓名
*将姓名发送给服务器
*接收返回的结果
*如果不允许则重复输入姓名
服务端:*接受姓名
*判断姓名是否存在
*将结果给客户端
*如果允许进入聊天室增加用户信息
*通知其他用户
*聊天
客户端:*创建新的进程
*一个进程循环发送消息
*一个进程循环接收消息
服务端:*接收请求,判断请求类型
*将消息转发给其他用户
*退出聊天室
客户端:*输入quit或者ctrl+c退出
*将请求发送给服务端
*结束进程
*接收端接收EXIT退出进程
客户端:*接收消息
*将退出消息告诉其他人
*
*
*管理员消息
4.协议
*如果允许进入聊天室,服务端发送OK给客户端
*如果不允许进入聊天室,服务端发送 不允许原因
*请求类别:
L-->进入聊天室
C-->聊天信息
Q-->退出聊天室
*用户存储结构:{name:addr...}
作业:1.整理客户端收发消息的显示情况
2.回顾思路
"""
from socket import *
import os,sys
#服务器地址
ADDR=("0.0.0.0",8888)
#存储用户信息
user = {}
def do_login(s,name,addr):
if name in user or "管理员" in name:
s.sendto("该用户已存在".encode(),addr)
return
s.sendto(b'OK',addr)
#通知其他人
msg="欢迎%s进入聊天室"%name
for i in user:
s.sendto(msg.encode(),user[i])
#将用户加入
user[name]=addr
#聊天
def do_chat(s,name,text):
msg="%s : %s"%(name,text)
for i in user:
if i !=name:
s.sendto(msg.encode(),user[i])
#退出程序
def do_quit(s,name):
msg="%s退出了聊天室"%name
for i in user:
if i !=name:
s.sendto(msg.encode(),user[i])
else:
s.sendto(b'EXIT',user[i])
#将用户删除
del user[name]
#接受各种客户端请求
def do_request(s):
while True:
data,addr=s.recvfrom(1024)
msg=data.decode().split(" ")
#区分请求类型
if msg[0]=="L":
do_login(s,msg[1],addr)
print(data.decode())
elif msg[0]=="C":
text=" ".join(msg[2:])
do_chat(s,msg[1],text)
print("%s : %s"%(msg[1],text))
elif msg[0]=="Q":
do_quit(s,msg[1])
#创建网络连接
def main():
#套接字
s=socket(AF_INET,SOCK_DGRAM)
s.bind(ADDR)
pid=os.fork()
if pid<0:
return
#发送管理员消息
elif pid==0:
while True:
msg=input("管理员消息:")
msg="C 管理员消息"+msg
s.sendto(msg.encode(),ADDR)
else:
#请求处理
do_request(s)#处理客户端请求
if __name__=="__main__":
main()
| true |