blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a55b0c1e1d008574adfa0bb1dde1ecd83e4ba4e4
|
Python
|
zeus911/aws_manage
|
/list_ec2.py
|
UTF-8
| 1,542
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import subprocess
import session
import parse_ec2
def list_instances():
ec2_config = parse_ec2.get_config('ec2.json')
s = session.create_session(ec2_config['region'])
ec2 = s.resource('ec2')
instances = ec2.instances.all()
instances_list = []
# No., Name, Instance-id, Instance-type, State, DNS, Key
string_format = '{:5}{:9}{:13}{:15}{:9}{:45}{:10}'
print string_format.format('No.',
'Name',
'Instance-id',
'Instance-type',
'State',
'DNS',
'Key'
)
for n, i in enumerate(instances):
i_map = {}
i_map['name'] = i.tags[0]['Value']
i_map['instance-id'] = i.instance_id
i_map['instance-type'] = i.instance_type
i_map['state'] = i.state['Name']
i_map['dns'] = i.public_dns_name
i_map['key'] = i.key_name
print string_format.format(str(n+1),
i_map['name'],
i_map['instance-id'],
i_map['instance-type'],
i_map['state'],
i_map['dns'],
i_map['key'],
)
instances_list.append(i_map)
return instances_list
| true
|
694ef56a81aa284deed576d81c7540659c52e540
|
Python
|
Shirleybini/Python-Projects
|
/Birthday Wisher/main.py
|
UTF-8
| 912
| 2.890625
| 3
|
[] |
no_license
|
import smtplib
import random
import datetime as dt
import pandas as pd
my_email = "youremailid@email.com"
password = "your password"
now = dt.datetime.now()
today = (now.month,now.day)
birthdays = pd.read_csv("birthdays.csv")
birthday_dict = {(data_row['month'],data_row['day']):data_row for (index, data_row) in birthdays.iterrows()}
if today in birthday_dict:
N = random.randint(1,3)
with open(f'letter_templates/letter_{N}.txt','r') as letter:
Letter = letter.read()
message = Letter.replace('[NAME]',birthday_dict[today][0])
message = message.replace('Angela','Your Name')
connection = smtplib.SMTP("smtp.gmail.com") #change according to email
connection.starttls()
connection.login(user=my_email,password = password)
connection.sendmail(from_addr=my_email,to_addrs=birthday_dict[today][1],msg= f"Subject:Happy Birthday!\n\n{message}")
connection.close()
| true
|
cf9aa3be71cde26348cf48f4faaee8228fde10bd
|
Python
|
Fengyongming0311/TANUKI
|
/Appium/LinBaO_Android/src/ReleasePage/Case07_GoodsDetail.py
|
UTF-8
| 2,875
| 2.5625
| 3
|
[] |
no_license
|
__author__ = 'TANUKI'
# coding:utf-8
import time,sys
sys.path.append("..")
class GoodsDetail:
def GoodsDetail(driver):
try:
driver.implicitly_wait(10)
#print ("开始执行用例7....进入商品详情")
time.sleep(3)
Homepage_all_handles = driver.window_handles
needhandle = driver.current_window_handle
for i in Homepage_all_handles:
if i != needhandle:
Detailpage_handles = i
break
#print("当前上下文为,", needhandle)
driver.find_element_by_xpath("/html/body/wx-view/wx-view[1]/wx-swiper/div/div[1]/div/wx-swiper-item[1]/wx-scroll-view/div/div/div/wx-view/wx-view[2]/wx-view[3]/wx-view[1]").click()
#按照xpath定位选择第一个商品 进入查看商品详情
time.sleep(2)
driver.implicitly_wait(10)
#driver.switch_to_window(Detailpage_handles)这个方法是旧方法,不换成新的报warning
driver.switch_to.window(Detailpage_handles)
"""
#debug: 不行就切换三个handle,然后打印每个handle的页面信息
for i in Detailpage_all_handles:
time.sleep(3)
print ("dondake当前句柄为",i)
driver.switch_to_window(i)
#print(driver.page_source ,encoding='utf-8')这个不对,会报错用下面的c的那个
c = (driver.page_source).encode("gbk", 'ignore').decode("gbk", "ignore")
print (c)
#当时为了验证页面元素在哪个handle里,便打印了所有的handle下的page_source,最终找到了
"""
"""
print ("#############################################33")
contexts = driver.contexts
print("contexts全部上下文",contexts)
dondake = driver.current_context
print ("当前上下文为:",dondake)
"""
#print(driver.page_source)
time.sleep(2)
goodsname = driver.find_element_by_xpath("/html/body/wx-view/wx-view[1]/wx-view[2]/wx-view[1]")
goodsprice = driver.find_element_by_xpath("/html/body/wx-view/wx-view[1]/wx-view[2]/wx-view[3]/wx-text[1]/span[2]")
if goodsname.text != None:
print ("商品名称:",goodsname.text)
print ("商品价格:",goodsprice.text)
unittest_TestResult = True
else:
raise Exception("商品名称为空,进入商品详情页面失败")
unittest_TestResult = True
except Exception as e:
print ("选择价格最高的商品进入商品详情脚本报错信息为:",e)
unittest_TestResult = False
finally:
return unittest_TestResult
| true
|
c3efc5ca1f6283b69c2390ae3c2ddd7b5df67623
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03826/s028473248.py
|
UTF-8
| 750
| 4
| 4
|
[] |
no_license
|
'''
問題:
二つの長方形があります。
一つ目の長方形は、縦の辺の長さが A、横の辺の長さが B です。
二つ目の長方形は、縦の辺の長さが C、横の辺の長さが D です。
この二つの長方形のうち、面積の大きい方の面積を出力してください。 なお、二つの長方形の面積が等しい時は、その面積を出力してください。
'''
'''
制約:
入力は全て整数である
1 ≦ A ≦ 10000
1 ≦ B ≦ 10000
1 ≦ C ≦ 10000
1 ≦ D ≦ 10000
'''
# 標準入力から A, B, C, D の値を取得する
a, b, c, d = map(int, input().split())
result = [a * b, c * d] # 結果格納用リスト
print(max(result))
| true
|
c643ccee915862483399b80591b38bed0cd7e0a6
|
Python
|
erjan/coding_exercises
|
/tictactoe.py
|
UTF-8
| 5,342
| 4.25
| 4
|
[
"Apache-2.0"
] |
permissive
|
'''
Tic-tac-toe is played by two players A and B on a 3 x 3 grid.
Here are the rules of Tic-Tac-Toe:
Players take turns placing characters into empty squares (" ").
The first player A always places "X" characters, while the second player B always places "O" characters.
"X" and "O" characters are always placed into empty squares, never on filled ones.
The game ends when there are 3 of the same (non-empty) character filling any row, column, or diagonal.
The game also ends if all squares are non-empty.
No more moves can be played if the game is over.
Given an array moves where each element is another array of size 2 corresponding to the row and column of the grid where they mark their respective character in the order in which A and B play.
Return the winner of the game if it exists (A or B), in case the game ends in a draw return "Draw", if there are still movements to play return "Pending".
You can assume that moves is valid (It follows the rules of Tic-Tac-Toe), the grid is initially empty and A will play first.
'''
#I spent 4h doing it, cuz did not know how to implement with helper functions - but eventually i did it!
#feel proud!
class Solution:
def tictactoe(self, moves: List[List[int]]) -> str:
def check_empty(self,grid):
for i in range(3):
for j in range(3):
if grid[i][j] == '':
return False
return True
def check_diag(self,grid):
diag1 = [grid[0][0], grid[1][1], grid[2][2]]
if diag1 == ['X', 'X', 'X'] :
#print('diag1')
print('player a wins ')
return 'A'
if diag1 == ['0', '0', '0']:
#print('diag1')
#print('player b wins')
return 'B'
diag2 = [grid[0][2], grid[1][1], grid[2][0] ]
if diag2 == ['X', 'X', 'X']:
#print('diag2')
print('player a wins')
return 'A'
if diag2 == ['0', '0', '0']:
#print('diag2')
#print(' player b wins')
return 'B'
#print('no diag detected')
return 'c'
def check_rows(self,grid):
for i in range(3):
row = grid[i]
if row == ['X', 'X', 'X']:
#print('row X %d player a win' % i)
return 'A'
elif row == ['0', '0', '0']:
#print('row o %d player b win' % i)
return 'B'
#print('no win row detected')
return 'c'
def check_columns(self,grid):
for i in range(3):
x = 0
y = 0
for j in range(3):
if grid[j][i] == 'X':
x+=1
elif grid[j][i] == '0':
y+=1
if x == 3:
#print('column X win detected')
return 'A'
elif y == 3:
#print('column O win detected')
return 'B'
#print('no win columns')
return 'c'
def check_all(self,grid):
cols = check_columns(self,grid)
rows = check_rows(self,grid)
diags = check_diag(self,grid)
if cols != 'c':
#print('cols not win')
return cols
elif rows != 'c':
#print('rows not win')
return rows
elif diags!= 'c':
#print('diags not win')
return diags
def check_draw_pending(self,grid):
if check_empty(self,grid):
print('Draw')
return 'Draw'
else:
print('Pending')
return 'Pending'
def print_grid(self,grid):
#print('--------------------------------------------')
for i in range(3):
for j in range(3):
print(grid[i][j], end = ' ')
print()
grid = []
for i in range(3):
grid.append([''] * 3)
a_player = True
for move in moves:
x = move[0]
y = move[1]
if grid[x][y] == '':
if a_player:
grid[x][y] = 'X'
a_player = False
if check_all(self,grid) == 'A':
#print('A')
#print_grid(grid)
return 'A'
elif not a_player:
grid[x][y] = '0'
a_player = True
if check_all(self,grid) == 'B':
#print('B')
#print_grid(grid)
return 'B'
#print_grid(grid)
#print('checking draw pending')
#print_grid(grid)
return check_draw_pending(self,grid)
'''
def helper(self, x):
t = [x]*3
print(t)
return t
helper(self,moves[0][0])
'''
| true
|
55e86eedbf7c85aaf247d5e014703390adc2e96d
|
Python
|
sonex02/djangoOwn
|
/user/models.py
|
UTF-8
| 496
| 2.59375
| 3
|
[] |
no_license
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
# 设置性别在页面显示为汉字
sex_choices = (
(0, u'男'),
(1, u'女'),
)
nickname = models.CharField(max_length=30,verbose_name='昵称')
age = models.IntegerField(default=0,verbose_name='年龄')
sex = models.IntegerField(default=0,verbose_name='性别') # 0-男,1-女
class Meta:
ordering = ['username'] # 排序
| true
|
4a8a9c45f23bf39810f077acd62b80c60dea65db
|
Python
|
sandeepkompella/University-of-Washington
|
/Quiz - Regression.py
|
UTF-8
| 1,942
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 21:34:07 2020
@author: sandeepkompella
98102
"""
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import math
sales = pd.read_csv('home_data.csv')
#sales_98039 = sales.loc[sales['zipcode'] == 98039]
sales_98039 = sales[sales['zipcode'] == 98039]
#print(sales_98039)
print(sales_98039['price'].agg('mean'))
#Ans is 2160606.6
condition_one = sales['sqft_living'] > 2000
condition_two = sales['sqft_living'] <= 4000
sales_lt4000 = sales.loc[condition_one & condition_two]
print(sales_lt4000)
print(len(sales_lt4000)/len(sales))
#Ans - 0.42187572294452413
my_features = sales[['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']]
target = sales[['price']]
my_features_train, my_features_test, target_train, target_test = train_test_split(my_features,target,test_size=0.2,random_state = 0,)
model = LinearRegression()
model.fit(my_features_train, target_train)
y_pred = model.predict(my_features_test)
rmsd = np.sqrt(mean_squared_error(target_test, y_pred))
print("my_Root Mean Square Error \n", rmsd)
advanced_features = sales[['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode', 'condition', 'grade', 'waterfront', 'view', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_living15', 'sqft_lot15']]
advanced_features_train, advanced_features_test, target_train, target_test = train_test_split(advanced_features,target,test_size=0.2,random_state = 0)
advanced_model = LinearRegression()
advanced_model.fit(advanced_features_train, target_train)
y_pred1 = advanced_model.predict(advanced_features_test)
rmsd1 = np.sqrt(mean_squared_error(target_test, y_pred1))
print("advanced_Root Mean Square Error \n", rmsd1)
print(rmsd1-rmsd)
| true
|
fe0eea20ed853476b32ec735c5fa2abbc127444b
|
Python
|
acislab/pragma-cloud-spark
|
/first_try.py
|
UTF-8
| 2,973
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
# coding: utf-8
# ### Giving Keras a try, this code is based on the example from the lead Keras developer [here](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)
# In[1]:
import sys
# Limit to CPU
import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
activate_this_file = "/home/mcollins/spark_keras2/bin/activate_this.py"
with open(activate_this_file) as f:
exec(f.read(), {'__file__': activate_this_file})
import h5py
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
print("IMPORTS")
# The images have already been split into subdirectories for training/validation/testing. 70% in train, 20% in validation, 10% in test
# In[2]:
width, height = 256, 256
# Epochs and batch sizes can be messed around with and can make relatively big difference in the model
# In[3]:
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 280
nb_validation_samples = 80
epochs = 25
batch_size = 16
# In[4]:
if K.image_data_format() == 'channels_first':
input_shape = (3, width, height)
else:
input_shape = (width, height, 3)
# In[5]:
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# In[18]:
train_datagen = ImageDataGenerator(
#shear_range=0.2,
#zoom_range=0.2,
#horizontal_flip=True
)
# In[19]:
validation_datagen = ImageDataGenerator()
# In[20]:
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(width, height),
batch_size=batch_size,
class_mode='binary')
# In[21]:
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(width, height),
batch_size=batch_size,
class_mode='binary')
# This is where the magic happens. As you can see from the accuracy here, it is far from magic, i.e. this model doesn't work. But it's a start!
# In[22]:
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
# In[23]:
#model.save_weights('first_try.h5')
print(model.to_json())
| true
|
431b86ba9ddf4a11c82f0bcf3ee57431502e309d
|
Python
|
Sriramnat100/Discord_Bot
|
/bot.py
|
UTF-8
| 3,837
| 2.921875
| 3
|
[] |
no_license
|
import os
import json
import discord
import requests
import random
from discord.ext import commands
from discord.ext.commands import Bot
import ast
import json
import youtube_dl
client = commands.Bot(command_prefix = "$")
def get_quote():
#Getting quote from the API
response = requests.get("https://api.kanye.rest/")
json_data = json.loads(response.text)
quote = "Kanye once said:" + " " + json_data['quote']
return(quote)
@client.event
#Creates function that detects when bot is ready
async def on_ready():
print('We have logged in as {0.user}'.format(client))
#command is a piece of code that happens when the user tells the bot to do something
@client.command()
async def speed(ctx):
await ctx.send(f'Speed is {round (client.latency * 1000)}ms')
#we are defining amount has 5 bc if no one puts anything for amnt, it will clear 5
@client.command()
async def clear(ctx, amount = 5):
await ctx.channel.purge(limit=amount + 1)
#echo feature
@client.command()
async def echo(ctx, *,args):
await ctx.send(args)
await ctx.message.delete()
#Kanye quote
@client.command()
async def kanye(ctx):
quote = get_quote()
await ctx.send(quote)
#poll feature
@client.command()
async def poll(ctx, *, args):
no = "❌"
yes = "☑️"
await ctx.message.delete()
user_poll = await ctx.send(args)
await user_poll.add_reaction(yes)
await user_poll.add_reaction(no)
#/////////START OF MUSIC FUNCTION/////////////////
#Joining the VC
@client.command()
async def join(ctx,*,args):
#creating a vc to go in, name = args is vc name bot must join
voiceChannel = discord.utils.get(ctx.guild.voice_channels, name=args)
await voiceChannel.connect()
#creating a voice client
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
#Playing the song
@client.command()
async def play(ctx, url : str):
song_there = os.path.isfile("song.mp3")
try:
#Downloading and deleting song from computer after played
if song_there:
os.remove("song.mp3")
except PermissionError:
await ctx.send("Wait for current playing music to end or use stop command")
return
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
#Downloads youtube url
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
for file in os.listdir("./"):
if file.endswith(".mp3"):
os.rename(file, "song.mp3")
voice.play(discord.FFmpegPCMAudio("song.mp3"))
#Leaving function
@client.command()
async def leave(ctx):
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
#Defining when the bot is connected, it will only disconnect when the leave function is used
if voice.is_connected():
await voice.disconnect()
else:
await ctx.send("The bot is not connected to a vc")
#Defining the pause function
@client.command()
async def pause(ctx):
#Defining when bot should pause, it will only pause when pause fucntion called
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
if voice.is_playing():
voice.pause()
else:
await ctx.send("The bot is not playing music")
#Defining resume command
@client.command()
async def resume(ctx):
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
if voice.is_paused():
voice.resume()
else:
await ctx.send("The bot is not paused")
#Telling the bot to stop
@client.command()
async def stop(ctx):
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
voice.stop()
client.run('token')
| true
|
3195b5b458481e88e510025b25ddc31291d3c03f
|
Python
|
BruceYaphets/ball_in_box
|
/ball_in_box/ballinbox.py
|
UTF-8
| 2,238
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from .validate import validate
__all__ = ['ball_in_box']
def ball_in_box(num_of_circle, blockers):
circles=[]
#初始化球的坐标和半径
for tmp in range(num_of_circle):
circles.append([0,0,0])
#分100*100的点
axis_x=np.linspace(-1+0.000001,1-0.000001,400)
axis_y=np.linspace(-1+0.000001,1-0.000001,400)
maycenters=[(x,y) for x in axis_x
for y in axis_y]
#print('1111',maycenters[0],maycenters[4])
for index in range(num_of_circle):
maxcir=[0,0,0]
for maycenter in maycenters:
x=maycenter[0]
y=maycenter[1]
cir=[x,y,0]
cir=getCurrentSpotMaxCir(cir,index,circles,blockers)
if cir[2]>maxcir[2]:
maxcir=cir
circles[index]=maxcir
index+=1
draw(blockers,circles)
return circles
def getCurrentSpotMaxCir(circle,index,circles,blockers):
min_r=0
if (0==index):
r1=dirCirAndBlocler(circle,blockers)
r2=dirCirAndBounder(circle)
r=min(r1,r2)
circle[2]=r
#print(circle[2])
return circle
else:
r1=dirCirAndBlocler(circle,blockers)
r2=dirCirAndBounder(circle)
for i in range(index):
r3=dirtCirAndCir(circles[i],circle)-circles[i][2]
if min_r==0:
min_r=r3
min_r=min(min_r,r3)
circle[2]=min(r1,r2,min_r)
#print(circle[2])
return circle
def dirtCirAndCir(circle1,circle2):
dir=math.sqrt((circle1[0]-circle2[0])**2+(circle1[1]-circle2[1])**2)
return dir
def dirCirAndBlocler(circle,blockers):
mindir=0
for i in range(len(blockers)):
dir=math.sqrt((circle[0]-blockers[i][0])**2+(circle[1]-blockers[i][1])**2)
if 0==mindir:
mindir=dir
mindir=min(dir,mindir)
return mindir
def dirCirAndBounder(circle):
r1=circle[0]+1
r2=1-circle[0]
r3=1-circle[1]
r4=circle[1]+1
return min(r1,r2,r3,r4)
def draw(blockers,circles):
fig=plt.figure()
ax=fig.gca()
plt.xlim((-1,1))
plt.ylim((-1,1))
for i in blockers:
plt.scatter(i[0],i[1],color='',marker='.',edgecolor='g',s=20)
for i in circles:
if len(i)==1:
continue
circle=plt.Circle((i[0],i[1]),i[2],color='r',fill=False)
ax.add_artist(circle)
plt.show()
| true
|
0cee94d51c260b652ab9cc527d8f254fc20f1323
|
Python
|
devona/codewars
|
/uniq.py
|
UTF-8
| 448
| 4.03125
| 4
|
[] |
no_license
|
'''
Implement a function which behaves like the uniq command in UNIX.
It takes as input a sequence and returns a sequence in which all duplicate elements following each other have been reduced to one instance.
Example:
['a','a','b','b','c','a','b','c'] --> ['a','b','c','a','b','c']
'''
def uniq(seq):
i = 0
while i < len(seq)-1:
if seq[i] == seq[i+1]:
seq.pop(i+1)
else:
i += 1
return seq
| true
|
469fb3a61300b78184f53655d1456641b0c15868
|
Python
|
hed-standard/hed-specification
|
/tests/test_summarize_testdata.py
|
UTF-8
| 2,860
| 2.703125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
import os
import json
import unittest
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'json_tests'))
cls.test_files = [os.path.join(test_dir, f) for f in os.listdir(test_dir)
if os.path.isfile(os.path.join(test_dir, f))]
@staticmethod
def get_test_info(test_file, details=True):
indent = " "
with open(test_file, "r") as fp:
test_info = json.load(fp)
out_list = [f"{test_info[0]['error_code']}"]
for info in test_info:
out_list.append(f"\n{indent}{info['description']}")
out_list.append(f"{indent}HED {info['schema']}")
out_list.append(f"{indent}Definitions:")
for defs in info["definitions"]:
out_list.append(f"{indent*2}{defs}")
if "string_tests" in info["tests"]:
out_list = out_list + MyTestCase.get_test_details(info["tests"]["string_tests"], "string_tests", indent)
if "sidecar_tests" in info["tests"]:
out_list = out_list + \
MyTestCase.get_test_details(info["tests"]["sidecar_tests"], "sidecar_tests", indent)
if "event_tests" in info["tests"]:
out_list = out_list + \
MyTestCase.get_test_details(info["tests"]["event_tests"], "event_tests", indent)
return "\n".join(out_list)
@staticmethod
def get_test_details(test_item, title, indent, details=True):
num_fail_tests = len(test_item.get("fails", 0))
num_pass_tests = len(test_item.get("passes", 0))
detail_list = [f"{indent*2}{title}: fail_tests={num_fail_tests} pass_tests={num_pass_tests}"]
if num_fail_tests > 0:
detail_list.append(f"{indent*3}fail_tests:")
for test in test_item["fails"]:
detail_list.append(f"{indent*4}{test}")
if num_pass_tests > 0:
detail_list.append(f"{indent * 3}pass_tests:")
for test in test_item["passes"]:
detail_list.append(f"{indent * 4}{test}")
return detail_list
def test_summary(self):
for test_file in self.test_files:
out_str = self.get_test_info(test_file)
print(out_str)
self.assertEqual(True, True) # add assertion here
def test_summary_full(self):
for test_file in self.test_files:
print(test_file)
out_str = self.get_test_info(test_file, details=True)
print(out_str + '\n')
self.assertEqual(True, True) # add assertion here
if __name__ == '__main__':
unittest.main()
| true
|
2c8855b1f554f5832a2445b73089804b66486c4c
|
Python
|
linliqun/shujusuan
|
/test_1.py
|
UTF-8
| 492
| 3.234375
| 3
|
[] |
no_license
|
#设有n个整数,将他们连接成一排,组成一个最大的多位整数
#如:n=3时,3个整数13,312,343,连接成最大的整数位34331213
#如:n=4,4个整数7,13,4,246连接成最大整数位7424613
#code为
n=int(input())
str=input().split()
max_s=''
def find(num):
global max_s
if len(num)<=0:
return
a=num[0]
for b in num:
if a+b<b+a:
a=b
max_s+=a
num.remove(a)
find(num)
return max_s
print(find(str))
| true
|
6c48a36b0c6736e9dfce8a468175f1072af46c7e
|
Python
|
zabcdefghijklmnopqrstuvwxy/AI-Study
|
/1.numpy/topic45/topic45.py
|
UTF-8
| 585
| 3.84375
| 4
|
[] |
no_license
|
#numpy.argmax(a, axis=None, out=None)
#返回沿轴axis最大值的索引。
#Parameters:
#a : array_like
#数组
#axis : int, 可选
#默认情况下,索引的是平铺的数组,否则沿指定的轴。
#out : array, 可选
#如果提供,结果以合适的形状和类型被插入到此数组中。
#Returns:
#index_array : ndarray of ints
#索引数组。它具有与a.shape相同的形状,其中axis被移除。
import numpy as np
arr=np.random.random(10)
print(f"random array is {arr}")
arr[arr.argmax()] = 0
print(f"replace the maximum value by 0 is {arr}")
| true
|
74b11b230e856498c91059a4a18228f8e917b81c
|
Python
|
SSyangguang/railroad-detection
|
/railroadDetectionLine.py
|
UTF-8
| 6,866
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
# 使用线性拟合对铁轨区域进行绘制
# 输入为视频预处理过程中求得的背景图,处理完成后会分别保存绘制了铁轨区域的原图和只有铁轨区域的图像
import numpy as np
import cv2
# 定义边缘检测中需要用到的参数
blurKernel = 21 # Gaussian blur kernel size
cannyLowThreshold = 10 # Canny edge detection low threshold
cannyHighThreshold = 130 # Canny edge detection high threshold
# 定义hough变换参数
rho = 1 # rho的步长,即直线到图像原点(0,0)点的距离
theta = np.pi / 180 # theta的范围
threshold = 50 # 累加器中的值高于它时才认为是一条直线
min_line_length = 150 # 线的最短长度,小于该值的线段会被忽略
max_line_gap = 20 # 两条直线之间的最大间隔,小于此值,认为是一条直线
def roi_mask(img, vertices):
# img是输入的图像,vertices是ROI四个点的坐标
# 生成与输入图像分辨率相同的纯黑图像,用于后期绘制铁轨区域
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
# 判断图像通道数量,如果不是单通道图像,则为每个图像的每个通道都添加白色蒙版
if len(img.shape) > 2:
# 图像的通道数量
channel_count = img.shape[2] # 将图像的通道数量赋值给channel_count
mask_color = (255,) * channel_count # 例如三通道彩图channel_count=3,那么mask_color=(255, 255, 255)
else:
mask_color = 255 # 单通道灰度图
cv2.fillPoly(mask, vertices, mask_color) # 使用白色填充多边形,形成蒙板
masked_img = cv2.bitwise_and(img, mask) # 使用蒙版与原图相与,得到只有ROI区域的图像
return masked_img
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, ymin, ymax):
# 函数输出的直接就是一组直线点的坐标位置(每条直线用两个点表示[x1,y1],[x2,y2])
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),
minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lanes(line_img, lines, ymin, ymax)
return line_img
def draw_lanes(img, lines, ymin, ymax):
# 初始化存储两条铁轨直线的坐标
left_lines, right_lines = [], []
# 通过铁轨斜率,将该直线存入left_lines或者right_lines,斜率需要自己调整
for line in lines:
for x1, y1, x2, y2 in line:
k = (y2 - y1) / (x2 - x1)
if k > 1:
left_lines.append(line)
else:
right_lines.append(line)
# clean_lines(left_lines, 0.1) # 弹出左侧不满足斜率要求的直线
# clean_lines(right_lines, 0.1) #弹出右侧不满足斜率要求的直线
# 提取左侧直线族中的所有的第一个点
left_points = [(x1, y1) for line in left_lines for x1, y1, x2, y2 in line]
# 提取左侧直线族中的所有的第二个点
left_points = left_points + [(x2, y2) for line in left_lines for x1, y1, x2, y2 in line]
# 提取右侧直线族中的所有的第一个点
right_points = [(x1, y1) for line in right_lines for x1, y1, x2, y2 in line]
# 提取右侧侧直线族中的所有的第二个点
right_points = right_points + [(x2, y2) for line in right_lines for x1, y1, x2, y2 in line]
# 拟合点集,生成直线表达式,并计算左侧直线在图像中的两个端点的坐标
left_vtx = calc_lane_vertices(left_points, ymin, ymax)
# 拟合点集,生成直线表达式,并计算右侧直线在图像中的两个端点的坐标
right_vtx = calc_lane_vertices(right_points, ymin, ymax)
# 初始化铁轨区域多边形四个顶点的坐标
vtx = []
print(left_vtx[0])
print(left_vtx[1])
print(right_vtx[1])
print(right_vtx[0])
# 依次添加左下角、左上角、右上角、右下角的坐标
vtx.append(left_vtx[0])
vtx.append(left_vtx[1])
vtx.append(right_vtx[1])
vtx.append(right_vtx[0])
# cv2.fillPoly()中传入的坐标点需要是三维的,所以需要这步再添加一个维度
vtx = np.array([vtx])
# 通过坐标点,填充铁轨区域为(0, 255, 0)
cv2.fillPoly(img, vtx, (0, 255, 0))
# 将不满足斜率要求的直线弹出
def clean_lines(lines, threshold):
slope = []
for line in lines:
for x1, y1, x2, y2 in line:
k = (y2 - y1) / (x2 - x1)
slope.append(k)
while len(lines) > 0:
# 计算斜率的平均值,因为后面会将直线和斜率值弹出
mean = np.mean(slope)
# 计算每条直线斜率与平均值的差值
diff = [abs(s - mean) for s in slope]
# 计算差值的最大值的下标
idx = np.argmax(diff)
if diff[idx] > threshold: # 将差值大于阈值的直线弹出
slope.pop(idx) # 弹出斜率
lines.pop(idx) # 弹出直线
else:
break
# 拟合点集,生成直线表达式,并计算直线在图像中的两个端点的坐标
def calc_lane_vertices(point_list, ymin, ymax):
x = [p[0] for p in point_list] # 提取x
y = [p[1] for p in point_list] # 提取y
fit = np.polyfit(y, x, 1) # 用一次多项式x=a*y+b拟合这些点,fit是(a,b)
fit_fn = np.poly1d(fit) # 生成多项式对象a*y+b
xmin = int(fit_fn(ymin)) # 计算这条直线在图像中最左侧的横坐标
xmax = int(fit_fn(ymax)) # 计算这条直线在图像中最右侧的横坐标
return [(xmin, ymin), (xmax, ymax)]
def processing(img):
# 定义ROI区域的四个顶点,分别对应左下角、左上角、右上角和右下角四个点的坐标
roi_vtx = np.array([[(810, 1080), (810, 490), (1920, 490), (1920, 1080)]])
# ymin和ymax是后期规定铁轨区域时限制的范围,即ROI的上下边界Y轴坐标
ymin = roi_vtx[0][1][1]
ymax = roi_vtx[0][0][1]
# 输入图像预处理,对图像进行高斯滤波和canny边缘检测
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blur_gray = cv2.GaussianBlur(gray, (blurKernel, blurKernel), 0, 0)
edges = cv2.Canny(blur_gray, cannyLowThreshold, cannyHighThreshold)
# 根据输入的ROI区域顶点坐标,将输入图像除了ROI区域像素置为0,相当于给图像ROI添加蒙版
roi_edges = roi_mask(edges, roi_vtx)
# 使用hough变换检测直线,并将检测出来的直线进行筛选后绘制出铁轨区域
line_img = hough_lines(roi_edges, rho, theta, threshold, min_line_length, max_line_gap, ymin, ymax)
# 将铁轨区域和输入图像进行叠加
res_img = cv2.addWeighted(img, 1, line_img, 0.5, 0)
return res_img, line_img
| true
|
5d517ace3832098a813e9b910b1c1c70d3c9809e
|
Python
|
immzz/leetcode_solutions
|
/balanced binary tree.py
|
UTF-8
| 618
| 3.40625
| 3
|
[] |
no_license
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
return self.do(root,0) >= 0
def do(self,root,depth):
if not root:
return depth
left_depth = self.do(root.left,depth+1)
right_depth = self.do(root.right,depth+1)
if abs(left_depth-right_depth) > 1:
return -1
return max(left_depth,right_depth)
| true
|
f2443989bfa65edce94d65f953d2b579265351bb
|
Python
|
jmanuel1/dice
|
/dice.py
|
UTF-8
| 4,663
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
import cmd
import random
import logging
from collections import namedtuple
class DiceRollInterface(cmd.Cmd):
intro = "Roll simulated dice like a pro. Type 'help' for help."
def do_stats(self, _):
"""Do stat rolls (4d6-drop 1 six times)."""
d = Die()
for _ in range(6):
print((d(6) * 4).drop(1))
def do_shell(self, roll):
"""Do a general roll, as specified by a string.
The syntax, in EBNF, is:
roll = num of rolls, "d", num of sides, ["-drop", num to drop],
["*", num of times] ;
num of rolls = INTEGER ;
num of sides = INTEGER ;
num to drop = INTEGER ;
num of times = INTEGER ;
For example, the stat rolls would be the command !4d6-drop1*6.
"""
self._roll = roll
rollObject = None
try:
rollObject = self._parse_roll()
self._execute_roll(rollObject)
except DiceException as e:
print(e)
def _parse_num_of_rolls(self):
return self._lex_INTEGER()
def _parse_num_of_sides(self):
return self._lex_INTEGER()
def _parse_num_to_drop(self):
return self._lex_INTEGER()
def _parse_num_of_times(self):
return self._lex_INTEGER()
def _lex_INTEGER(self):
index = 1
while True:
if not self._roll[:index].isdecimal() or index > len(self._roll):
break
index += 1
if index == 1:
self._error()
integer = int(self._roll[:index - 1])
self._consume(index - 1)
return integer
def _consume(self, chars):
self._roll = self._roll[chars:]
def _error(self):
column = self._line_len - len(self._roll) + 2
raise DiceSyntaxError(f"Syntax error at column {column}!")
def _parse_roll(self):
"""Parse the roll string self._roll and execute the roll.
Returns a namedtuple containing properties of the role.
Implementation detail: the parser is a recursive descent parser.
"""
self._line_len = len(self._roll)
num_of_rolls = self._parse_num_of_rolls()
if not self._roll.startswith("d"):
self._error()
self._consume(1)
num_of_sides = self._parse_num_of_sides()
num_to_drop = 0
if self._roll.startswith("-drop"):
self._consume(5)
num_to_drop = self._parse_num_to_drop()
num_of_times = 1
if self._roll.startswith("*"):
self._consume(1)
num_of_times = self._parse_num_of_times()
self._roll_should_be_empty()
return Roll(
num_to_drop=num_to_drop,
num_of_rolls=num_of_rolls,
num_of_sides=num_of_sides,
num_of_times=num_of_times
)
def _execute_roll(self, roll):
# Execute the roll
d = Die()
for _ in range(roll.num_of_times):
completedRoll = (
d(roll.num_of_sides)
* roll.num_of_rolls).drop(roll.num_to_drop)
print(completedRoll)
def _roll_should_be_empty(self):
if self._roll:
self._error()
Roll = namedtuple(
'Roll', ['num_of_times', 'num_of_sides', 'num_of_rolls', 'num_to_drop'])
class Die:
def __init__(self, _original=None, logger=logging.getLogger(__name__)):
self._rolls = []
self._sides = -1
self._logger = logger
if _original is not None:
self._rolls = _original._rolls[:]
self._sides = _original._sides
def __call__(self, sides):
new_die = Die(self)
new_die._sides = sides
new_die._roll()
self._logger.debug(
'A die was rolled: d({}) => {}'.format(sides, new_die._rolls))
return new_die
def __mul__(self, rolls):
new_die = Die(self)
for _ in range(len(self._rolls) * (rolls - 1)):
new_die._roll()
logMessage = 'A die was rolled multiple times: {}*d => {}'.format(
rolls, new_die._rolls)
self._logger.debug(logMessage)
return new_die
def drop(self, num):
new_die = Die(self)
new_die._rolls = sorted(new_die._rolls)[num:]
return new_die
def __str__(self):
return str(sum(self._rolls))
def _roll(self):
self._rolls.append(random.randint(1, self._sides))
class DiceException(Exception):
pass
class DiceSyntaxError(SyntaxError, DiceException):
pass
def main():
random.seed()
interface = DiceRollInterface()
interface.cmdloop()
if __name__ == '__main__':
main()
| true
|
a8397f856fb106ca38110dd81b9db1583285aa7e
|
Python
|
miyagipipi/studying
|
/Tree/验证前序遍历序列二叉搜索树 255.py
|
UTF-8
| 1,591
| 3.859375
| 4
|
[] |
no_license
|
'''
给定一个整数数组,你需要验证它是否是一个二叉搜索树正确的先序遍历序列。
你可以假定该序列中的数都是不相同的
'''
class Solution:
def verifyPreorder(self, preorder: List[int]) -> bool:
stack = []
new_min = float('-inf') # 初始化下限值
for i in range(len(preorder)):
if preorder[i] < new_min: return False
while stack and preorder[i] > stack[-1]:
new_min = stack.pop()
stack.append(preorder[i])
return True
'''
如果我们按照前序遍历整个二叉搜索树的值,会得到一个先递减再递增的序列
如果不满足上述要求(多次递增递减),则必然不满足二叉搜索树特征
现在我们已经按前序遍历得到这一数组了--preorder -> list
然后遍历这一数组,用一个辅助栈存储每次遍历的元素
用一个变量 pre_min 来存在当前最小的元素,此遍历初始化为负无穷大
这个变量的定义是小于当前元素的所有元素中最大值的那一个元素
之后的元素都应大于它
如果值可以重复,这里就通过不了,但是原题中值是不重复的
当前节点的值大于辅助栈的栈顶元素时,表面序列开始递减,
这时更新当前最小元素为栈顶元素并弹出栈顶元素。
循环上述过程直到stack为空或者栈顶元素大于当前节点值
如果出现当前元素小于当前最小元素,则说明序列出现了递减,则不满是基本要求
'''
| true
|
a73d69642c84e46eadff0441b96ef991b39cdcdb
|
Python
|
Rejeected/Vacancy
|
/script/pentagon.py
|
UTF-8
| 503
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
data = open('baza.txt').read().splitlines()
#data = ['_+=+_ssssss_+=+_']
file = open('prof_data.txt', 'w')
file.write('var dict_prof_names = {\n')
count_lines = 0
for line in data:
if line[:5] == '_+=+_' and line[-5:] == '_+=+_':
if count_lines != 0:
file.write("],\n")
count_lines += 1
file.write(" '" + line[5:-5] + "': [")
else:
names = line.split('/')
for name in names:
file.write("'" + name + "', ")
file.write('\n}')
| true
|
bd1a1fe6e4c7d7af55e3e88bef3047439023c8a6
|
Python
|
Demons-wx/leetcode_py
|
/leetcode/q20_valid_parenttheses.py
|
UTF-8
| 888
| 4
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
__author__ = "wangxuan"
# Given a string containing just the characters '(', ')', '{', '}', '[' and ']',
# determine if the input string is valid.
#
#
# The brackets must close in the correct order, "()" and "()[]{}" are all valid
# but "(]" and "([)]" are not.
#
# Time: O(n)
class Solution:
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
stack, lookup = [], {"(": ")", "{": "}", "[": "]"}
for parenthese in s:
if parenthese in lookup:
stack.append(parenthese) # 如果是 左半边 括号,压入栈中
elif len(stack) == 0 or lookup[stack.pop()] != parenthese: # 如果是右半边括号,从栈中弹出一个看是否匹配
return False
return len(stack) == 0
if __name__ == "__main__":
print(Solution().isValid("([])"))
| true
|
2f55e7ceca5eb7c87c0b12dd2f916dc04d9ecaa7
|
Python
|
PavelGordeyev/mastermind
|
/mastermind.py
|
UTF-8
| 3,360
| 4.09375
| 4
|
[] |
no_license
|
#####################################################################
## CS325 Analysis of Algoirthms
## Program name: HW6 - Portfolio Assignment (Option 1) - Mastermind
##
## Author: Pavel Gordeyev
## Date: 8/9/20
## Description: Text-based version of the mastermind game. User will
## have 8 turns to guess the color and sequence of 4
## pegs. Peg colors are represented by numbers 1-6.
## Each turn, they will receive feedback on the
## correctness of the guess that was played.
## An "X" means that it is an exact match for color
## and location, while a "O" is only a match for color.
## The results are in a random order.
#####################################################################
import re
from helpers import *
#####################################################################
## Description: Performs the gameplay for the Mastermind game
#####################################################################
def main():
# Display welcome message
welcome = "Welcome to Mastermind!!!\nIn this game, you will attempt to find the correct color and sequence of 4 pegs.\nYou will have 8 guesses to try and guess the solution and win the game!\nEach turn, we will show either a white dot, represented by a O signifying \nthat one of the pegs is the correct color but in the wrong location,\nor a black dot, represented by an X signifying that one of the pegs is in\nthe correct location and is of the correct color. \nThe order of the results, which follow the '|', are in no particular order.\n\n Enter /q to quit at anytime"
print("\n",welcome,"\n\n")
# Create random peg selection
solution = []
makeRandomPegs(solution)
# Initialize dictionary to store counts of colors in solution
colorCounts = dict()
# Initialize an array for the player's turns
guesses = []
# Initialize an array for each guesses results
results = []
# Initialize winning boolean
won = False
# Allow player to make 8 guesses
for turn in range(0,8):
# Validate user input
while(1):
# Ask user for a guess; convert to integer array
guess = input("Enter your guess(4 Values from 1-6 ONLY;ex. 1 2 3 4): ")
if guess == "/q":
print("Ok, see you next time!")
exit()
elif not re.match('^[0-9\s]*$',guess.rstrip()): # Matches numbers and spaces only
print("Invalid entry!")
else:
guess = list(map(int,guess.split()))
if len(guess) < 4:
print("\nNot enough values were entered. Please enter 4 values!\n")
else:
if not isValidGuess(guess,1,6):
print("\nOne of the guessed pegs is not a valid value! Please try again!\n")
else:
break
# Add to list of guesses
guesses.append(guess)
# Check if user's guess is completely correct
if not isGuessCorrect(guess,solution):
# Reset color counts
setColorCounts(solution,colorCounts)
# Get the status of how correct the guess was
getGuessStatus(guess,solution,results, colorCounts)
# Print out board with results all turns with white and black pegs
printGameBoard(guesses,results)
else: # Player won the game
print("\nCongrats! You got it right! The solution was: ",guess)
won = True
break
if not won:
# Solution was not found by the user after 8 turns
print("\nYou ran out of turns! Please try again next time!\n")
# Call main function
if __name__ == "__main__":
main()
| true
|
0a5fda59438f0e993073bacaec2f6bd47508767a
|
Python
|
gdassori/fresh_onions
|
/pastebin/proxies_rotator.py
|
UTF-8
| 3,160
| 2.703125
| 3
|
[] |
no_license
|
__author__ = 'guido'
from bs4 import BeautifulSoup
import requests
import random
import time
class DoYourOwnProxyRotator():
def __init__(self, logger):
self.logger = logger
@property
def proxies(self):
self.logger.error('wtf I said do your own')
raise NotImplementedError()
@property
def random_proxy(self):
self.logger.error('hard to learn')
raise NotImplementedError()
def ban(self, proxy):
self.logger.error('...')
raise NotImplementedError()
def is_valid(self, proxy):
raise NotImplementedError()
class SingleProxy(DoYourOwnProxyRotator):
def __init__(self, logger, hostname='127.0.0.1', port='8888'):
self.proxy = '{}:{}'.format(hostname, port)
super(SingleProxy, self).__init__(logger)
@property
def proxies(self):
return [self.proxy]
@property
def random_proxy(self):
return self.proxy
def ban(self, proxy):
self.logger('trying to ban single proxy {}, weird'.format(self.proxy))
def is_valid(self, proxy):
return proxy == self.proxy
class USProxyOrg_Fetcher(DoYourOwnProxyRotator):
def __init__(self, logger, expire=1800):
self._proxies = []
self._blacklist = []
self.expire = expire
self.lastupdate = 0
super(USProxyOrg_Fetcher, self).__init__(logger)
def _download_proxies(self):
self.logger.info('Downloading proxies...')
plist = BeautifulSoup(requests.get('http://www.us-proxy.org/').text, "lxml")
rows = plist.find('table').find_all('tr')
pr = []
for row in rows:
proxy = ''
proxy_found = False
for td in row.find_all('td'):
if td.text.count('.') == 3:
proxy += td.text
proxy_found = True
elif proxy_found:
proxy += ':' + td.text
break
if proxy not in self._proxies + self._blacklist:
pr.append(proxy)
self._proxies.extend([proxy for proxy in pr if proxy not in self._proxies])
self.lastupdate = int(time.time())
self.logger.info('New {} proxies saved from www.us-proxy.org, total of {} proxies listed'.format(len(pr),
len(self._proxies)))
@property
def proxies(self):
p = [proxy for proxy in self._proxies if proxy not in self._blacklist]
if not p or not self.lastupdate or int(time.time()) - self.lastupdate > self.expire:
self._download_proxies()
return [proxy for proxy in self._proxies if proxy not in self._blacklist and proxy != '']
return p
@property
def random_proxy(self):
proxies = self.proxies
# take from the firsts, usually more reliable
return proxies[random.randint(0, 2)]
def ban(self, proxy):
self._blacklist.append(proxy)
if proxy in self._proxies:
self._proxies.remove(proxy)
def is_valid(self, proxy):
return proxy not in self._blacklist
| true
|
c4f9dd6fee46912749c3095e0585e52a8c1122cd
|
Python
|
shnehna/object_study
|
/object/异常.py
|
UTF-8
| 373
| 3.390625
| 3
|
[] |
no_license
|
try:
num = int(input('请输入一个整数'))
result = 8 / num
print(result)
except ZeroDivisionError:
print("除0错误")
except ValueError:
print("输入错误")
except Exception as e:
print("未知错误 %s" % e)
else:
print("没有异常 结果为 %s" % result)
finally:
print("无论是否有异常都会有执行")
print("另一个")
| true
|
bed40ada834fe80e6219e6eee963fd19263c1287
|
Python
|
New-generation-hsc/PPractice
|
/week4/vggnet.py
|
UTF-8
| 4,536
| 2.53125
| 3
|
[] |
no_license
|
from __future__ import print_function
import paddle.v2
import paddle.fluid as fluid
import random
import shutil
import numpy as np
from datetime import datetime
from PIL import Image
import os
import sys
FIXED_IMAGE_SIZE = (32, 32)
params_dirname = "image_classification.inference.model"
def input_program():
# The image is 64 * 64 * 3 with rgb representation
data_shape = [3, 32, 32] # Channel, H, W
img = fluid.layers.data(name='img', shape=data_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
return img, label
def vgg_bn_drop(input):
def conv_block(ipt, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=ipt,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=512, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
predict = fluid.layers.fc(input=fc2, size=60, act='softmax')
return predict
def train_program():
img, label = input_program()
predict = vgg_bn_drop(img)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
top1_accuracy = fluid.layers.accuracy(input=predict, label=label)
top5_accuracy = fluid.layers.accuracy(input=predict, label=label, k=5)
return [avg_cost, top1_accuracy, top5_accuracy]
def optimizer_program():
return fluid.optimizer.Adam(learning_rate=0.001)
def custom_reader_creator(images_path):
# return a reader generator
def reader():
for label in os.listdir(images_path):
path = os.path.join(images_path, label)
for img_name in os.listdir(path):
img_path = os.path.join(path, img_name)
img = load_image(img_path)
yield img, int(label) - 1
return reader
def load_image(img_path):
im = Image.open(img_path)
im = im.resize(FIXED_IMAGE_SIZE, Image.ANTIALIAS)
im = np.array(im).astype(np.float32)
# The storage order of the loaded image is W(width),
# H(height), C(channel). PaddlePaddle requires
# the CHW order, so transpose them.
im = im.transpose((2, 0, 1)) # CHW
im = im / 255.0
return im
# event handler to track training and testing process
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
if event.step % 100 == 0:
print("\nTime: [{}] Pass {}, Batch {}, Cost {}, Acc {}".format
(datetime.now() - start, event.step, event.epoch, event.metrics[0],
event.metrics[1]))
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, fluid.EndEpochEvent):
# Test against with the test dataset to get accuracy.
avg_cost, top1_accuracy, top5_accuracy = trainer.test(
reader=test_reader, feed_order=['img', 'label'])
print('\nTime:[{}] Test with Pass {}, Loss {}, Acc {} Top5 Acc: {}'.format(datetime.now() - start, event.epoch, avg_cost, top1_accuracy, top5_accuracy))
# save parameters
if params_dirname is not None:
trainer.save_params(params_dirname)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(train_func=train_program, optimizer_func=optimizer_program, place=place)
# Each batch will yield 128 images
BATCH_SIZE = 128
# Reader for training
train_reader = paddle.batch(
paddle.reader.shuffle(custom_reader_creator("/home/vibrant/data/newstoretag/train"), buf_size=500),
batch_size=BATCH_SIZE
)
# Reader for testing
test_reader = paddle.batch(
custom_reader_creator("/home/vibrant/data/newstoretag/test"), batch_size=BATCH_SIZE
)
start = datetime.now()
trainer.train(
reader=train_reader,
num_epochs=100,
event_handler=event_handler,
feed_order=['img', 'label'])
| true
|
71c34fca099dd7ad4aa5040083ae22eafab3feee
|
Python
|
mudsahni/MIT6.006
|
/DocumentDistance/document_distance.py
|
UTF-8
| 1,643
| 3.625
| 4
|
[] |
no_license
|
import sys
import os
import math
import string
def read_file(filename):
try:
f = open(filename, 'r')
return f.read()
except IOError:
print(f"Error opening input file: {filename}.")
sys.exit()
translation_table = str.maketrans(string.punctuation + string.ascii_uppercase,
" " * len(string.punctuation) + string.ascii_lowercase)
def get_words_from_line_list(text):
text = text.translate(translation_table)
return text.split()
def count_frequency(word_list):
D = {}
for word in word_list:
if word in D:
D[word] += 1
else:
D[word] = 1
return D
def get_word_frequencies(filename):
file = read_file(filename)
word_list = get_words_from_line_list(file)
return count_frequency(word_list)
def inner_product(D1, D2):
sum = 0
for key, value in D1.items():
if key in D2:
sum += value * D2[key]
return sum
def vector_angle(D1, D2):
numerator = inner_product(D1, D2)
denominator = math.sqrt(inner_product(D1, D1) * inner_product(D2, D2))
return math.acos(numerator/denominator)
def main():
if len(sys.argv) != 3:
print(f"Usage: python document_distance.py filename1 filename2")
else:
filename1 = sys.argv[1]
filename2 = sys.argv[2]
D1 = get_word_frequencies(filename1)
D2 = get_word_frequencies(filename2)
distance = vector_angle(D1, D2)
print(f"The distance between document1 and document2 is {distance} radians.")
if __name__ == "__main__":
main()
| true
|
43cfd5328e5a391bc490c1812ef18b30198e3578
|
Python
|
GeniusV/alphacoders-wallpaper-downloader
|
/downloader_2.py
|
UTF-8
| 4,172
| 2.78125
| 3
|
[] |
no_license
|
import os
import re
import sqlite3
import urllib.request
import shutil
from lxml import etree
from tqdm import tqdm
maxNumber = 999
first = None
count = 0
# open the file to store the result
# the result will be stored at /Users/GeniusV/Desktop/result.txt
def insert(tablename, address):
try:
sql = '''insert into %s VALUES ('%s')
''' % (tablename, address)
# print("Inserting %s..."% tablename)
db.execute(sql)
db.commit()
except sqlite3.OperationalError as e:
if str("no such table: " + tablename) == str(e):
createTable(tablename)
insert(tablename, address)
else:
raise e
def createTable(tablename):
create_table = '''create table %s
(address text UNIQUE )
''' % (tablename)
print("Creating table %s..." % tablename)
db.execute(create_table)
db.commit()
def geturl():
sql = '''select * from animate'''
result = db.execute(sql)
dict = {}
for row in result:
dict[row[0]] = row[1]
return dict
def ifExists(tablename, address):
try:
sql = '''select * from %s where address='%s'
''' % (tablename, address)
result = db.execute(sql).fetchall()
if (len(result)) == 0:
return False
else:
return True
except sqlite3.OperationalError as e:
if str("no such table: " + tablename) == str(e):
createTable(tablename)
ifExists(tablename, address)
def getCount(table_name):
try:
sql = '''select count(*) as count from %s''' % (table_name)
result = db.execute(sql)
ans = result.fetchone()[0]
return ans
except sqlite3.OperationalError as e:
raise e
def get_img_id(address):
m = re.match("https://initiate.alphacoders.com/download/wallpaper/(\d+)/", address)
return m.group(1)
def get_img_count(page):
result = page.xpath(u'/html/body/h1/i')
print(result[0].text)
if __name__ == '__main__':
try:
big_bar = tqdm(total = 1000)
print("Script is running...")
print("Connecting database...")
db = sqlite3.connect("wallpaper.db")
print("Getting animate links...")
urls = geturl()
# update animate alone
# name =
# url =
for name, url in urls.items():
count = 0
os.makedirs('/Users/GeniusV/Desktop/%s/' % name)
path = '/Users/GeniusV/Desktop/%s/%s-result.txt' % (name, name)
print("Creating file %s..." % path)
with open(path, 'w') as file:
# loop for the real urls containing the links
for currentNumber in range(1, maxNumber):
realUrl = url + str(currentNumber)
data = urllib.request.urlopen(realUrl)
html = data.read()
page = etree.HTML(html)
# get all nodes contains the link
p = page.xpath(u"//span[@title='Download Wallpaper!']")
# check if the link has been collected. If false, loop will break, if
# true , store the links.
if first != p[0].get('data-href'):
# get node contain the link
print("Working on " + realUrl)
for arg in p:
# output
link = arg.get('data-href')
num = get_img_id(link)
if not ifExists(name, num):
file.write(link)
file.write('\n')
insert(name, num)
count += 1
# store the first link
first = p[0].get('data-href')
print("%s updates: %d" % (name, count))
else:
break
if count == 0:
shutil.rmtree('/Users/GeniusV/Desktop/%s' % name)
except Exception as e:
raise e
finally:
db.close()
| true
|
b9a5192e6d4a29deebf196b927e6ba372a1a63ae
|
Python
|
tongni1975/gtfspy
|
/gtfspy/routing/util.py
|
UTF-8
| 517
| 3.609375
| 4
|
[
"MIT",
"ODbL-1.0",
"CC-BY-4.0"
] |
permissive
|
import time
def timeit(method):
"""
A Python decorator for printing out the execution time for a function.
Adapted from:
www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
"""
def timed(*args, **kw):
time_start = time.time()
result = method(*args, **kw)
time_end = time.time()
print('timeit: %r %2.2f sec (%r, %r) ' % (method.__name__, time_end-time_start, str(args)[:20], kw))
return result
return timed
| true
|
43e6ebd62209d0ba29e73e9574e432ad576962c9
|
Python
|
adriansr/challenges
|
/codejam/2019/QR/A. Foregone Solution/solve.py
|
UTF-8
| 407
| 3.390625
| 3
|
[] |
no_license
|
def solve(target):
a = ''.join([x if x != '4' else '3' for x in target])
b = ''.join(['0' if x != '4' else '1' for x in target])
while len(b) > 0 and b[0] == '0':
b = b[1:]
if len(b) == 0:
b = '0'
return a, b
T = int(raw_input())
for case_num in range(1, T+1):
target = raw_input()
(a, b) = solve(target)
print 'Case #{0}: {1} {2}'.format(case_num, a, b)
| true
|
ad666308f2bfa5b0c13c2489019d8511e2e65000
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02534/s467288787.py
|
UTF-8
| 118
| 2.765625
| 3
|
[] |
no_license
|
N = int(input())
ACL_str = []
for i in range(N):
ACL_str.append('ACL')
mojiretu = ''.join(ACL_str)
print(mojiretu)
| true
|
bb4f329f096fc13e426ea8340b9ed788e093fc9d
|
Python
|
saranyab9064/optimal_routing
|
/optimal_routing.py
|
UTF-8
| 19,219
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
from ryu.base import app_manager
from ryu.controller import mac_to_port
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet
from ryu.lib.packet import arp
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import ipv6
from ryu.lib.packet import ether_types
from ryu.lib.packet import udp
from ryu.lib.packet import tcp
from ryu.lib import mac, ip
from ryu.lib import hub
from ryu.ofproto import inet
from ryu.topology.api import get_switch, get_link, get_host
from ryu.app.wsgi import ControllerBase
from ryu.topology import event, switches
from collections import defaultdict
from operator import itemgetter
from dataclasses import dataclass
import heapq
# to implement the background running process
import threading
import os
import random
import time
#Cisco Reference value for bandwidth which is 1Gbps
REFERENCE_BW = 10000000
DEFAULT_BW = 10000000
MAX_PATHS = 1
@dataclass
class Paths:
''' Paths container'''
path: list()
cost: float
class optimal_algo(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(optimal_algor, self).__init__(*args, **kwargs)
self.mac_to_port = {}
# We store a values in the dictionary inorder to avoid the duplications
# The input will take: {SwithId: {Neighbour1:Port_Switches,Neighbour2:Port_Switches}}
self.neigh = defaultdict(dict)
#it is same as before but the value here is [switch][port]
self.bw = defaultdict(lambda: defaultdict( lambda: DEFAULT_BW))
self.prev_bytes = defaultdict(lambda: defaultdict( lambda: 0))
#It stores in the EthernetSource:(DPID_of_switch, In_Port)
self.hosts = {}
# To maintain list of switches
self.switches = []
# Map the mac address to the respective MAC:PORT
self.arp_table = {}
# The current path of the network structure is (src, in_port, dst, out_port):Path
self.path_table = {}
# The probability path of the network (src, in_port, dst, out_port):Paths
self.paths_table = {}
# Current network optimal_path structure (src, in_port, dst, out_port):Path_with_ports
self.path_with_ports_table = {}
# Create a dictionary to store the switch dpid and dp [SwitchDPID:SwitchDP]
self.datapath_list = {}
#Create a tuple to calculate the path and store the src address, input port and output port (src, in_port, dst, out_port)
self.path_calculation_keeper = []
def find_path_cost(self,path):
''' arg path is an list with all nodes in our route '''
path_cost = []
for i in range(len(path) - 1):
port1 = self.neigh[path[i]][path[i + 1]]
#port2 = self.neigh[path[i + 1]][path[i]]
bandwidth_between_two_nodes = self.bw[path[i]][port1]
#path_cost.append(REFERENCE_BW / bandwidth_between_two_nodes)
path_cost.append(bandwidth_between_two_nodes)
return sum(path_cost)
def find_paths_and_costs(self, source, destination):
'''
Implementation of Breath-First Search (BFS) Algorithm
Take the starting node as source and ending node as destination
Used queues to make the track of the visited nodes
Output of this function returns an list on class Paths objects
'''
# if the source and destination address is the same return to the path function
if source == destination:
# print("Source is the same as destination ! ")
return [Paths(source,0)]
queue = [(source, [source])]
# The possible path list can be commented out if we use generator
possible_paths = list()
# if elements in the queue exists
while queue:
# if there are no adjacent vertices left, remove the first element from the queue
(edge, path) = queue.pop()
# iterate the loop to perform the visiting node and mark it as display and insert the element in the queue
for vertex in set(self.neigh[edge]) - set(path):
# Repeat the above steps until we find out the desired element from the queue. Until then perform removing the element
if vertex == destination:
path_to_dst = path + [vertex]
cost_of_path = self.find_path_cost(path_to_dst)
possible_paths.append(Paths(path_to_dst, cost_of_path))
else:
queue.append((vertex, path + [vertex]))
return possible_paths
def find_n_optimal_paths(self, paths, number_of_optimal_paths = MAX_PATHS):
'''arg Paths is an list containing lists of possible paths'''
costs = [path.cost for path in paths]
index_of_optimal_path = list(map(costs.index, heapq.nsmallest(number_of_optimal_paths,costs)))
optimal_paths = [paths[op_index] for op_index in index_of_optimal_path]
return optimal_paths
def add_ports_to_paths(self, paths, f_port, l_port):
'''
Add the ports to all switches including hosts
'''
port_path = list()
# bf is a dict to store the dpid in the format of ingress port and egress port switchDPID:(Ingress_Port, Egress_Port)
bf = {}
in_port = f_port
for s1, s2 in zip(paths[0].path[:-1], paths[0].path[1:]):
out_port = self.neigh[s1][s2]
bf[s1] = (in_port, out_port)
in_port = self.neigh[s2][s1]
bf[paths[0].path[-1]] = (in_port, l_port)
port_path.append(bf)
return port_path
def install_paths(self, src, f_port, dest, l_port, ip_src, ip_dst, type, pkt):
''' Instalacja sciezek '''
# self.topology_discover(src, f_port, dst, l_port)
if (src, f_port, dst, l_port) not in self.path_calculation_keeper:
self.path_calculation_keeper.append((src, f_port, dst, l_port))
self.topology_discover(src, f_port, dest, l_port)
self.topology_discover(dest, l_port, src, f_port)
for node in self.path_table[(src, f_port, dest, l_port)][0].path:
dp = self.datapath_list[node]
ofp = dp.ofproto
ofp_parser = dp.ofproto_parser
actions = []
in_port = self.path_with_ports_table[(src, f_port, dest, l_port)][0][node][0]
out_port = self.path_with_ports_table[(src, f_port, dest, l_port)][0][node][1]
actions = [ofp_parser.OFPActionOutput(out_port)]
if type == 'UDP':
nw = pkt.get_protocol(ipv4.ipv4)
l4 = pkt.get_protocol(udp.udp)
match = ofp_parser.OFPMatch(in_port = in_port,
eth_type=ether_types.ETH_TYPE_IP,
ipv4_src=ip_src,
ipv4_dst = ip_dst,
ip_proto=inet.IPPROTO_UDP,
udp_src = l4.src_port,
udp_dst = l4.dst_port)
self.logger.info(f"Installed path in switch: {node} out port: {out_port} in port: {in_port} ")
self.add_flow(dp, 33333, match, actions, 10)
self.logger.info("UDP Flow added ! ")
elif type == 'TCP':
nw = pkt.get_protocol(ipv4.ipv4)
l4 = pkt.get_protocol(tcp.tcp)
match = ofp_parser.OFPMatch(in_port = in_port,
eth_type=ether_types.ETH_TYPE_IP,
ipv4_src=ip_src,
ipv4_dst = ip_dst,
ip_proto=inet.IPPROTO_TCP,
tcp_src = l4.src_port,
tcp_dst = l4.dst_port)
self.logger.info(f"Installed path in switch: {node} out port: {out_port} in port: {in_port} ")
self.add_flow(dp, 44444, match, actions, 10)
self.logger.info("TCP Flow added ! ")
elif type == 'ICMP':
nw = pkt.get_protocol(ipv4.ipv4)
match = ofp_parser.OFPMatch(in_port=in_port,
eth_type=ether_types.ETH_TYPE_IP,
ipv4_src=ip_src,
ipv4_dst = ip_dst,
ip_proto=inet.IPPROTO_ICMP)
self.logger.info(f"Installed path in switch: {node} out port: {out_port} in port: {in_port} ")
self.add_flow(dp, 22222, match, actions, 10)
self.logger.info("ICMP Flow added ! ")
elif type == 'ARP':
match_arp = ofp_parser.OFPMatch(in_port = in_port,
eth_type=ether_types.ETH_TYPE_ARP,
arp_spa=ip_src,
arp_tpa=ip_dst)
self.logger.info(f"Install path in switch: {node} out port: {out_port} in port: {in_port} ")
self.add_flow(dp, 1, match_arp, actions, 10)
self.logger.info("ARP Flow added ! ")
#return self.path_with_ports_table[0][src][1]
return self.path_with_ports_table[(src, f_port, dest, l_port)][0][src][1]
def add_flow(self, datapath, priority, match, actions, idle_timeout, buffer_id = None):
''' Method Provided by the source Ryu library. '''
#To use openflow library
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match, idle_timeout = idle_timeout,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, idle_timeout = idle_timeout, instructions=inst)
datapath.send_msg(mod)
def run_check(self, ofp_parser, dp):
''' Co sekunde watek wypytuje switche o status portow i wysylany jest PortStatsReq'''
threading.Timer(1.0, self.run_check, args=(ofp_parser, dp)).start()
req = ofp_parser.OFPPortStatsRequest(dp)
#self.logger.info(f"Port Stats Request has been sent for sw: {dp} !")
dp.send_msg(req)
def topology_discover(self, src, f_port, dest, l_port):
''' Obliczanie optymalnej sciezki dla zadanych parametrow + przypisanie portow '''
threading.Timer(1.0, self.topology_discover, args=(src, f_port, dest, l_port)).start()
paths = self.find_paths_and_costs(src, dest)
path = self.find_n_optimal_paths(paths)
path_with_port = self.add_ports_to_paths(path, f_port, l_port)
self.logger.info(f"Possible paths: {paths}")
self.logger.info(f"Optimal Path with port: {path_with_port}")
self.paths_table[(src, f_port, dest, l_port)] = paths
self.path_table[(src, f_port, dest, l_port)] = path
self.path_with_ports_table[(src, f_port, dest, l_port)] = path_with_port
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes", ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
arp_pkt = pkt.get_protocol(arp.arp)
ip_pkt = pkt.get_protocol(ipv4.ipv4)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
if src not in self.hosts:
self.hosts[src] = (dpid, in_port)
out_port = ofproto.OFPP_FLOOD
if eth.ethertype == ether_types.ETH_TYPE_IP:
nw = pkt.get_protocol(ipv4.ipv4)
if nw.proto == inet.IPPROTO_UDP:
l4 = pkt.get_protocol(udp.udp)
elif nw.proto == inet.IPPROTO_TCP:
l4 = pkt.get_protocol(tcp.tcp)
if eth.ethertype == ether_types.ETH_TYPE_IP and nw.proto == inet.IPPROTO_UDP:
src_ip = nw.src
dst_ip = nw.dst
self.arp_table[src_ip] = src
h1 = self.hosts[src]
h2 = self.hosts[dst]
self.logger.info(f" IP Proto UDP from: {nw.src} to: {nw.dst}")
out_port = self.install_paths(h1[0], h1[1], h2[0], h2[1], src_ip, dst_ip, 'UDP', pkt)
self.install_paths(h2[0], h2[1], h1[0], h1[1], dst_ip, src_ip, 'UDP', pkt)
elif eth.ethertype == ether_types.ETH_TYPE_IP and nw.proto == inet.IPPROTO_TCP:
src_ip = nw.src
dst_ip = nw.dst
self.arp_table[src_ip] = src
h1 = self.hosts[src]
h2 = self.hosts[dst]
self.logger.info(f" IP Proto TCP from: {nw.src} to: {nw.dst}")
out_port = self.install_paths(h1[0], h1[1], h2[0], h2[1], src_ip, dst_ip, 'TCP', pkt)
self.install_paths(h2[0], h2[1], h1[0], h1[1], dst_ip, src_ip, 'TCP', pkt)
elif eth.ethertype == ether_types.ETH_TYPE_IP and nw.proto == inet.IPPROTO_ICMP:
src_ip = nw.src
dst_ip = nw.dst
self.arp_table[src_ip] = src
h1 = self.hosts[src]
h2 = self.hosts[dst]
self.logger.info(f" IP Proto ICMP from: {nw.src} to: {nw.dst}")
out_port = self.install_paths(h1[0], h1[1], h2[0], h2[1], src_ip, dst_ip, 'ICMP', pkt)
self.install_paths(h2[0], h2[1], h1[0], h1[1], dst_ip, src_ip, 'ICMP', pkt)
elif eth.ethertype == ether_types.ETH_TYPE_ARP:
src_ip = arp_pkt.src_ip
dst_ip = arp_pkt.dst_ip
if arp_pkt.opcode == arp.ARP_REPLY:
self.arp_table[src_ip] = src
h1 = self.hosts[src]
h2 = self.hosts[dst]
self.logger.info(f" ARP Reply from: {src_ip} to: {dst_ip} H1: {h1} H2: {h2}")
out_port = self.install_paths(h1[0], h1[1], h2[0], h2[1], src_ip, dst_ip, 'ARP', pkt)
self.install_paths(h2[0], h2[1], h1[0], h1[1], dst_ip, src_ip, 'ARP', pkt)
elif arp_pkt.opcode == arp.ARP_REQUEST:
if dst_ip in self.arp_table:
self.arp_table[src_ip] = src
dst_mac = self.arp_table[dst_ip]
h1 = self.hosts[src]
h2 = self.hosts[dst_mac]
self.logger.info(f" ARP Reply from: {src_ip} to: {dst_ip} H1: {h1} H2: {h2}")
out_port = self.install_paths(h1[0], h1[1], h2[0], h2[1], src_ip, dst_ip, 'ARP', pkt)
self.install_paths(h2[0], h2[1], h1[0], h1[1], dst_ip, src_ip, 'ARP', pkt)
actions = [parser.OFPActionOutput(out_port)]
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def _switch_features_handler(self, ev):
'''
To send packets for which we dont have right information to the controller
Method Provided by the source Ryu library.
'''
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions, 10)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
'''Reply to the OFPPortStatsRequest, visible beneath'''
switch_dpid = ev.msg.datapath.id
for p in ev.msg.body:
# stores the port information in Mbit/s
self.bw[switch_dpid][p.port_no] = (p.tx_bytes - self.prev_bytes[switch_dpid][p.port_no])*8.0/1000000
self.prev_bytes[switch_dpid][p.port_no] = p.tx_bytes
#self.logger.info(f"Switch: {switch_dpid} Port: {p.port_no} Tx bytes: {p.tx_bytes} Bw: {self.bw[switch_dpid][p.port_no]}")
@set_ev_cls(event.EventSwitchEnter)
def switch_enter_handler(self, ev):
switch_dp = ev.switch.dp
switch_dpid = switch_dp.id
ofp_parser = switch_dp.ofproto_parser
self.logger.info(f"Switch has been plugged in PID: {switch_dpid}")
if switch_dpid not in self.switches:
self.datapath_list[switch_dpid] = switch_dp
self.switches.append(switch_dpid)
#function which runs in the bakground every 1 second
self.run_check(ofp_parser, switch_dp)
@set_ev_cls(event.EventSwitchLeave, MAIN_DISPATCHER)
def switch_leave_handler(self, ev):
switch = ev.switch.dp.id
if switch in self.switches:
try:
self.switches.remove(switch)
del self.datapath_list[switch]
del self.neigh[switch]
except KeyError:
self.logger.info(f"Switch has been already pulged off PID{switch}!")
@set_ev_cls(event.EventLinkAdd, MAIN_DISPATCHER)
def link_add_handler(self, ev):
self.neigh[ev.link.src.dpid][ev.link.dst.dpid] = ev.link.src.port_no
self.neigh[ev.link.dst.dpid][ev.link.src.dpid] = ev.link.dst.port_no
self.logger.info(f"Link between switches has been established, SW1 DPID: {ev.link.src.dpid}:{ev.link.dst.port_no} SW2 DPID: {ev.link.dst.dpid}:{ev.link.dst.port_no}")
@set_ev_cls(event.EventLinkDelete, MAIN_DISPATCHER)
def link_delete_handler(self, ev):
try:
del self.neigh[ev.link.src.dpid][ev.link.dst.dpid]
del self.neigh[ev.link.dst.dpid][ev.link.src.dpid]
except KeyError:
self.logger.info("Link has been already pluged off!")
pass
| true
|
05e4739e03668252b4dd32f415993fc49d6808a5
|
Python
|
coding-2/GuessingGame
|
/Main.py
|
UTF-8
| 400
| 2.859375
| 3
|
[] |
no_license
|
from Graphics import Graphics
graphics = Graphics()
answer = ["zealous", "history", "moist", "flag", "geyser", "squish", "quotation", "", "oblique", "ink", "dogs", "pancake", "fox", "dragon", "turtle", "stripes"]
def function():
graphics.startup()
#The Graphics go here#
def guessedBefore(letter):
while True:
guess = graphics.guess()
guessedBefore(guess)
| true
|
14ddd9002b6cedde726beced947236cf255660ce
|
Python
|
idcmp/pyd
|
/pyd/tools/toolbox.py
|
UTF-8
| 2,954
| 3.15625
| 3
|
[] |
no_license
|
"""
SDK ontop of the API
"""
from datetime import date
from pyd.api import diaryreader as reader
from pyd.api import diarywriter as writer
from pyd.api import diarymodel as model
from pyd.api import naming
from pyd.api import carryforward as cf
from pyd.api.diarymodel import MAXIMUM_HOLIDAY_WEEKS
def find_todos_in_file(filename):
dr = reader.DiaryReader()
week = dr.read_file(filename)
return model.find_todos_in_week(week)
def find_yesterday():
"""Find the last day in the diary which is not today;
this will go back up to MAXIMUM_HOLIDAY_WEEKS if needed."""
current_diary = naming.current_name()
cf.perform_carryforward()
ensure_current_header_exists(current_diary)
days_to_go_back = 0
yesterday = None
while yesterday is None:
days_to_go_back += 1
yesterday = _try_yesterday(days_to_go_back)
if days_to_go_back == MAXIMUM_HOLIDAY_WEEKS * 7:
return None
return yesterday
def _try_yesterday(days_to_go_back):
"""Private. Get the week for the passed in days_to_go_back. If it's the current week, find the day logged
prior to the current day. If there is a gap (ie, today is Thursday and there's an entry for Tuesday but
not Wednesday - then 'yesterday' is Tuesday.
If it's not the current week, then return the last day for the found week (or None if there are no entries for that
week).
"""
yesterday_diaryname = naming.relative_name(days_ago=days_to_go_back)
week = reader.DiaryReader().read_file(yesterday_diaryname)
if yesterday_diaryname == naming.current_name():
if len(week.days()) == 0 or is_today(week.days()[0]):
return None
# Pair up N and N+1 in tuples, iterate through each tuple and if N+1 matches
# today, then return N.
for yesterday, today in zip(week.days(), week.days()[1:]):
if is_today(today):
return yesterday
size = len(week.days())
if not size:
return None
return week.days()[size - 1]
def ensure_current_header_exists(filename):
"""Ensures the day header for the current day exists in filename.
This will read/rewrite the file to add one.
"""
week = reader.DiaryReader().read_file(filename)
# if we have more than one for whatever reason, that's fine.
if len(filter(lambda day: day.my_day == date.today(), week.days())) == 0:
day = model.Day(date.today())
week.entries.append(day)
writer.DiaryWriter().write_file(filename, week)
def read_and_rewrite(filename):
"""Converts a diary file to a model and back again.
Used after a manual edit to the file occurs to perform any needed synchronization.
"""
week = reader.DiaryReader().read_file(filename)
writer.DiaryWriter().write_file(filename, week)
def is_today(day):
"""Is the passed in Day object "today" ?"""
return model.Day(date.today()) == day
| true
|
d637a6ce0277854e816eac6ecd064f17bde5f5df
|
Python
|
heikeadel/slot_filling_system
|
/modul_output.py
|
UTF-8
| 856
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#####################################
### CIS SLOT FILLING SYSTEM ####
### 2014-2015 ####
### Author: Heike Adel ####
#####################################
from __future__ import unicode_literals
import codecs, sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
import copy
class Output():
def setResults(self,myResults):
self.myResults = copy.deepcopy(myResults)
def writeResults(self, resultFile):
self.logger.info("writing results to file " + resultFile)
out = open(resultFile, 'a')
for r in self.myResults:
out.write(r + "\n")
out.close()
def __init__(self, loggerMain):
self.myResults = []
self.logger = loggerMain.getChild(__name__)
| true
|
365e70fc0fae5ef38ec818652b0b5fcd3fd81dbe
|
Python
|
ganlubbq/communication-simulation
|
/wireless channel/rayleigh channel.py
|
UTF-8
| 2,344
| 3.046875
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import simulation.multipath
# reference: http://matplotlib.org/users/mathtext.html
t = [0]*10000
a0 = [0j]*len(t)
a1 = [0j]*len(t)
#模擬的時間為0~250ms
for i in range(len(t)):
t[i] = i/len(t)*0.25
#先來看看這個模型的振幅
for j in range(3):
if j==0:
fd = 10
wm = 2 * np.pi * 10 #模擬wm = 2*pi*100 即 maximum doppler frequency 為10Hz
elif j==1:
fd = 50
wm = 2 * np.pi * 50 #模擬wm = 2*pi*100 即 maximum doppler frequency 為50Hz
else:
fd = 100
wm = 2 * np.pi * 100 #模擬wm = 2*pi*100 即 maximum doppler frequency 為50Hz
for i in range(len(t)):
a0[i] = simulation.multipath.rayleigh(wm,t[i],2)[0]# path 1
a0[i] = abs(a0[i])
a0[i] = 10*np.log(a0[i])
a1[i] = simulation.multipath.rayleigh(wm,t[i],2)[1]# path 2
a1[i] = abs(a1[i])
a1[i] = 10*np.log(a1[i])
plt.figure(r'$\omega_m=2*{0}\pi\/\/\/(path1)$'.format(fd))
plt.title(r'$\omega_m=2\cdot{0}\pi(rad/s)\/\/\/or\/\/\/fd={0}(Hz)\/\/\/(path1)$'.format(fd))
plt.xlabel('time (sec)')
plt.ylabel('Rayleigh envelop in dB')
plt.plot(t,a0)
plt.xlim(0, 0.25)
plt.figure(r'$\omega_m = 2*{0}\pi\/\/\/(path2)$'.format(fd))
plt.title(r'$\omega_m = 2\cdot{0}\pi(rad/s)\/\/\/or\/\/\/fd={0}(Hz)\/\/\/(path2)$'.format(fd))
plt.xlabel('time (sec)')
plt.ylabel('Rayleigh envelop in dB')
plt.plot(t,a1)
plt.xlim(0,0.25)
#接下來看看這個模型的統計特性
t = [0]*100000
a0 = [0]*len(t)
a0_real = [0]*len(t)
a0_image = [0]*len(t)
a0_abs = [0]*len(t)
#取 0~10秒的點來進行觀察
#你會發現取0~100秒才會真的接近高斯分布
#取0~10秒的機率分布會很難看
for i in range(len(t)):
t[i] = i/len(t)*100
for i in range(len(t)):
a0[i] = simulation.multipath.rayleigh(wm,t[i],1)[0]# path 1 的振幅
a0_real[i] = a0[i].real
a0_image[i] = a0[i].imag
plt.figure('統計特性')
n,bins,c = plt.hist(a0_real,100,normed=True,label='pdf of channel fading real part')
y = [0]*len(bins)
std = 1/np.sqrt(2)
mu = 0
for i in range(len(y)):
y[i] = 1/(np.sqrt(2*np.pi)*std)*np.exp(-((bins[i]-mu)**2)/(2*std**2))
plt.plot(bins,y,label='gaussian distribution (mean = 0, variance = 1/2)')
plt.ylabel('pdf')
plt.legend()
plt.show()
| true
|
a27288de4d58bb1f2a3c6a7876b2ca48abd3a01c
|
Python
|
arianafm/Python
|
/Intermedio/Clase1/EjemploRaise2.py
|
UTF-8
| 310
| 3.828125
| 4
|
[] |
no_license
|
#Raise: invoca excepciones de Python.
while True:
mejorCurso = input("Ingresa cuál es el mejor curso de Proteco:")
mejorCursoConMinusculas = mejorCurso.lower()
if mejorCursoConMinusculas != "python am sala a":
raise ValueError
else:
print("Felicidades, Python AM sala A es el mejor curso.")
break
| true
|
8f0629eb3dfd32f5b366d258252361f29226c8b2
|
Python
|
BodleyAlton/scrape-cricinfo
|
/app/genalgo.py
|
UTF-8
| 5,309
| 2.6875
| 3
|
[] |
no_license
|
import random
from app.dtModel import model_dt
#--------Calculat fitness of player----
def plfitness(p):
fitness=0
#calculate fitnes of player and append to player.
ptype=p[1] # player Type
print("ptype"+str(ptype))
stats=p[2] # Player Statistics(based on player type)
bowStl=p[3][0]
batStyl=p[3][1]
# Calculate fitness for batsman
if "batsman" in ptype:
fitness+=0.4
batave=stats[0]
if batave < 20:
fitness+=0.2
elif batave>=20 and batave <=25:
fitness+=0.5
elif batave>=26 and batave <=30:
fitness+=0.7
elif batave>=31 and batave <=40:
fitness+=0.9
elif batave > 40:
fitness+=1
# Calculate fitness for wicketkeeper
if "wicketkeeper" in ptype:
wkts=stats[-1]
if wkts < 5:
fitness+=0.1
elif 5<= wkts<=10:
fitness+=0.2
elif 11<= wkts<=20:
fitness+=0.5
elif 21<= wkts<=40:
fitness+=0.6
elif 41<= wkts<=55:
fitness+=0.7
elif 56<= wkts<=70:
fitness+=0.8
elif 71<= wkts<=100:
fitness+=0.9
elif 101<= wkts<=110:
fitness+=1
elif 111<= wkts<=120:
fitness+=1.2
elif 121<= wkts<=130:
fitness+=1.5
elif 131<= wkts<=145:
fitness+=1.7
elif 146<= wkts<=160:
fitness+=1.8
elif wkts<160:
fitness+=2
# Calculate fitness for bowlerr
if "bowler" in ptype:
wkts=stats[-1]
bave=stats[0]
if wkts < 5 or bave<15:
fitness+=0.1
elif 5<= wkts<=10 or 15<bave<=20:
fitness+=0.2
elif 11<= wkts<=20 or 21<=bave<=25:
fitness+=0.5
elif 21<= wkts<=40 or 26<=bave<=28:
fitness+=0.6
elif 41<= wkts<=55 or 29<=bave<=31:
fitness+=0.7
elif 56<= wkts<=70 or 32<=bave<=35:
fitness+=0.8
elif 71<= wkts<=100 or 36<=bave<=38:
fitness+=0.9
elif 101<= wkts<=110 or 39<=bave<=40:
fitness+=1
elif 111<= wkts<=120 or 41<=bave<=43:
fitness+=1.2
elif 121<= wkts<=130 or 44<=bave<=46:
fitness+=1.5
elif 131<= wkts<=145 or 47<=bave<=49:
fitness+=1.7
elif 146<= wkts<=160 or 50<=bave<=52:
fitness+=1.8
elif wkts<160 or bave<53:
fitness+=2
# Calculate fitness for an allrounder
if "allrounder" in ptype:
fitness+=0.4
batave=stats[0]
wkts=stats[-1]
bave=stats[1]
if batave < 20:
fitness+=0.2
elif batave>=20 and batave <=25:
fitness+=0.5
elif batave>=26 and batave <=30:
fitness+=0.7
elif batave>=31 and batave <=40:
fitness+=0.9
elif batave > 40:
fitness+=1
if wkts < 5 or bave<15:
fitness+=0.1
elif 5<= wkts<=10 or 15<bave<=20:
fitness+=0.2
elif 11<= wkts<=20 or 21<=bave<=25:
fitness+=0.5
elif 21<= wkts<=40 or 26<=bave<=28:
fitness+=0.6
elif 41<= wkts<=55 or 29<=bave<=31:
fitness+=0.7
elif 56<= wkts<=70 or 32<=bave<=35:
fitness+=0.8
elif 71<= wkts<=100 or 36<=bave<=38:
fitness+=0.9
elif 101<= wkts<=110 or 39<=bave<=40:
fitness+=1
elif 111<= wkts<=120 or 41<=bave<=43:
fitness+=1.2
elif 121<= wkts<=130 or 44<=bave<=46:
fitness+=1.5
elif 131<= wkts<=145 or 47<=bave<=49:
fitness+=1.7
elif 146<= wkts<=160 or 50<=bave<=52:
fitness+=1.8
elif wkts<160 or bave<53:
fitness+=2
p.append(fitness)
#------Calculate team fitness (sum of each players fitness)-----
def tmfitness(t):
tot=0 #running total of fitness of each player
#Add each players fitness to tot
for p in t:
tot+=p[4]
# call DT model implementation; function shhould return a value which should be added to tot
tot+= model_dt(t) #add points for composition
# Apply penalty for constitution
t.append(tot) #append team fitness to team
#-----Crossover----
# @ Input: 2 Teams
# @Output: 1 Team....Crossover of teams(T1 X T2)
def crossover(T1,T2):
cpt=random.randint(1,10) #Selects a random cutpoint in range(1,10)
#Splice list at cpt
T1R=T1[cpt:]
T1L=T1[:cpt]
T2R=T2[cpt:]
T2L=T2[cpt:]
# Perform cross over f it will not introduce a duplicate player
for i in T2R:
if dups(T1L,i):
return True
else:
T1L.append(i)
return T1L # Crossover product
#-----Mutator----
#@Input: 1 Team
#@Output: 1 team with random change (change a player in the team)
def mutate(Team,plyr):
pt=random.randint(0,10) #Select random player in team to be replaced
if plyr in Team: #Assert Player to be added is not already on team
return True
del Team[pt] #remove randomly selected player
Team.append(plyr) # Add new player to team
#----Determine whether a player is already on a team
def dups(T,pl):
for p in T:
if pl[0] in p:
return True
return False
| true
|
3b60ba16a66455272178c5fc7135bda179d4fd86
|
Python
|
jchristman/adventofcode
|
/06/win.py
|
UTF-8
| 2,879
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
# ----------------------------------------------------------------------------------------------------------------------
# Advent of Code - Day 6
#
# The Python One-Liner challenge:
#
# Rules:
#
# - Reading input into a variable from another file or with an assignment is acceptable and does not count against
# your total for lines.
# - Your solution must take the form of 'print INSERT_CODE_HERE'
# - Formatting your print with a format string is acceptable so long as you are only substituting ONE value (multiple
# calculations are not allowed)
# - No global variables (outside of the input variable)
#
# ----------------------------------------------------------------------------------------------------------------------
import re
_input = [(re.findall('(toggle|off|on)', line)[0], tuple(map(tuple, map(lambda x: map(int, x.split(',')), re.findall('([0-9]+,[0-9]+)', line))))) for line in open('input', 'r').readlines()]
# ----------------------------------------------------------------------------------------------------------------------
# Part 1 (find how many lights are lit)
#
# This problem was a bit trickier than those previous. The strategy used here was run through all possible coordinates,
# generate a list of commands for each coordinate, then reduce the commands to a value based on a lambda function. If
# toggle, XOR the value at the coordinate with 1 to toggle state; if on, or the value with one to turn it on; if off,
# and the value with 0 to turn it off.
#
# The reduce function applies the lambda to a list in sequential pairs to reduce an iterator to a single value. For
# example, reduce(lambda x, y: x+y, [1,2,3,4]) becomes (((1 + 2) + 3) + 4). We can apply a similar strategy to a list
# like reduce(lambda_func, [0, 'on', 'toggle', 'off']) becomes (((0 | 1) ^ 1) & 0).
# ----------------------------------------------------------------------------------------------------------------------
print sum(reduce(lambda value, cmd: value ^ 1 if cmd == 'toggle' else value | 1 if cmd == 'on' else value & 0, [0] + [cmd for cmd,coords in _input if coords[0][0] <= i <= coords[1][0] and coords[0][1] <= j <= coords[1][1]]) for i in xrange(1000) for j in xrange(1000))
# ----------------------------------------------------------------------------------------------------------------------
# Part 2 (find the total brightness of the lights)
#
# The strategy here is the exact same as part 1, with different rules for each light.
# ----------------------------------------------------------------------------------------------------------------------
print sum(reduce(lambda value, cmd: max(value + 2 if cmd == 'toggle' else value + 1 if cmd == 'on' else value - 1, 0), [0] + [cmd for cmd,coords in _input if coords[0][0] <= i <= coords[1][0] and coords[0][1] <= j <= coords[1][1]]) for i in xrange(1000) for j in xrange(1000))
| true
|
2612ac5d77226a4a1259f8f36ea8aeb9b666459f
|
Python
|
gullabi/long-audio-aligner
|
/utils/segment.py
|
UTF-8
| 10,899
| 2.84375
| 3
|
[] |
no_license
|
import os
import subprocess
import logging
from math import floor
from utils.beam import Beam
class Segmenter(object):
def __init__(self, alignment, silence = 0.099, t_min = 2, t_max = 10):
# TODO assert alignment object has target_speaker and punctuation
self.alignment = alignment
self.silence = silence
self.t_min = t_min # TODO for now useless
self.t_max = t_max
self.beam_width = 10
self.get_target_blocks()
self.segments = []
self.best_segments = []
def get_target_blocks(self):
'''
Currently gets only the first block, works 100% under the assumption that
there is only one single speaker block
'''
target_found = False
search_beginning = True
block_tuples = []
start = -1
end = 0
for i, token in enumerate(self.alignment):
if i > end:
if search_beginning:
if token.get('target_speaker'):
start = i
search_beginning = False
else:
if not token.get('target_speaker'):
end = i-1
block_tuples.append((start, end))
search_beginning = True
# if end is not found for the last block, it means
# target speaker block is the last block and
# needs to be added to the block_tuples
if not block_tuples:
if start != -1:
# there is a single block that ends with the
# target speaker
block_tuples.append((start,i))
else:
msg = 'no target speaker block was found'
logging.error(msg)
raise ValueError(msg)
else:
# for multiple blocks ending with the target speaker
if end == block_tuples[-1][1] and\
start != block_tuples[-1][0]:
block_tuples.append((start,i))
self.alignment_blocks = []
for start, end in block_tuples:
self.alignment_blocks.append(self.alignment[start:end+1])
def get_segments(self):
segments = []
for block in self.alignment_blocks:
segments += self.get_block_segments(block)
# check if all segments are shorter than t_max
for segment in segments:
if (segment['end'] - segment['start']) > self.t_max:
#logging.warning('long segment found, cutting it\n%s'\
# %segment['words'])
shorter_segments = self.shorten_segment(segment)
# shorthen segment might fail
if shorter_segments:
for shorter_segment in shorter_segments:
self.segments.append(shorter_segment)
#logging.warning('* resulting in: %s'\
# %shorter_segment['words'])
else:
self.segments.append(segment)
else:
self.segments.append(segment)
def get_block_segments(self, block):
'''
calculates the minimum length segments based on adjacent tokens
with timestamps and silences between them
'''
# the block should start and with with a token with start (end) time
indicies = [i for i, token in enumerate(block) if token.get('start')]
start_index, end_index = indicies[0], indicies[-1]+1
cropped_block = block[start_index:end_index]
unit_segments = join_block(cropped_block, self.silence)
return unit_segments
def shorten_segment(self, segment):
'''
takes a segment longer than t_max and divides it
until all the parts are smaller than t_max
'''
found = False
tokens = []
for token in self.alignment:
if token.get('start') == segment['start']:
found = True
if found:
tokens.append(token)
if token.get('end') == segment['end']:
break
if not found:
msg = "the segment to be shortened not found in alignment "\
"tokens"
logging.error(msg)
raise ValueError(msg)
# get silences
silences = []
for i, token in enumerate(tokens):
if i > 0:
token_before = tokens[i-1]
if token_before.get('end') and token.get('start'):
silences.append(token['start']-token_before['end'])
else:
silences.append(0)
int_set = set(silences)
if 0 in int_set:
int_set.remove(0)
silence_values = list(int_set)
silence_values.sort(reverse=True)
# get cut indicies starting from the longest silence
cut_index = []
for val in silence_values:
for i, silence in enumerate(silences):
if silence == val:
cut_index.append(i)
# cut the segment starting from the longest silence interval
final_cut_index = []
new_segments = []
for index in cut_index:
final_cut_index.append(index)
new_segments = join_tokens(tokens, final_cut_index)
max_duration = max([nsegment['end']-nsegment['start']\
for nsegment in new_segments])
if max_duration <= self.t_max:
break
# cut_index and hence new_segments could end up empty
if new_segments:
# optimize the new segments since there could be many single token ones
sh_beam = Beam(5, 0.4*self.t_min, 0.72*self.t_max)
for segment in new_segments:
sh_beam.add(segment)
return sh_beam.sequences[0]
else:
return []
def optimize(self):
beam = Beam(self.beam_width, self.t_min, self.t_max)
for segment in self.segments:
beam.add(segment)
# sequences are ordered according to the score
# and the first element has the best score
self.best_segments = beam.sequences[0]
def segment_audio(self, audio_file, base_path='tmp'):
base_name = '.'.join(os.path.basename(audio_file).split('.')[:-1])
path = os.path.join(base_path, base_name[0], base_name[1])
if not os.path.isdir(path):
os.makedirs(path)
for segment in self.best_segments:
self.segment_cue(audio_file, segment, path)
if (float(segment['end']) - float(segment['start'])) > 15.:
msg = '%s longer than 15 s'%segment['segment_path']
logging.info(msg)
@staticmethod
def segment_cue(audio, cue, base_path):
audio_tool = 'ffmpeg'
seek = floor(cue['start'])
start = cue['start'] - seek
end = cue['end']
duration = end - cue['start']
basename = '.'.join(os.path.basename(audio).split('.')[:-1])
cue['segment'] = '_'.join([basename, str(cue['start']), str(cue['end'])])
cue['segment_path'] = os.path.join(base_path, cue['segment'])+'.wav'
args = [audio_tool, '-y', '-hide_banner', '-loglevel', 'panic',\
'-ss', str(seek), '-i', audio, '-ss', str(start), \
'-t', str(duration), '-ac', '1', '-ar', '16000', \
cue['segment_path']]
if os.path.isfile(cue['segment_path']):
logging.debug("%s already exists skipping"%cue['segment'])
else:
subprocess.call(args)
if not os.path.isfile(cue['segment_path']):
msg = "File not created from ffmpeg operation %s"\
%cue['segment_path']
logging.error(msg)
raise IOError(msg)
def join_block(cropped_block, silence):
unit_segments = []
segment = {'words': '', 'original_words': '',
'start': cropped_block[0]['start']}
for i, element in enumerate(cropped_block):
if i < len(cropped_block)-1:
segment['words'] += element['word'] + ' '
segment['original_words'] += element['original_word'] + ' '
next_element = cropped_block[i+1]
if element.get('end') and next_element.get('start'):
if (float(next_element['start'])-float(element['end'])) >= \
silence:
segment['end'] = element['end']
segment['words'] = segment['words'].strip()
segment['original_words'] = segment['original_words'].strip()
if element.get('punctuation'):
segment['punctuation'] = element['punctuation']
unit_segments.append(segment)
# new segment
segment = {'words': '', 'original_words': '',
'start':next_element['start']}
else:
segment['words'] += element['word']
segment['original_words'] += element['original_word']
segment['end'] = element['end']
unit_segments.append(segment)
return unit_segments
def join_tokens(tokens, indicies):
'''
takes cut indicies and joins tokens accordingly
cut index of 0 means, there will be two segments tokens[:1], tokens[1:]
cut indicies of 0 and 4 means tokens[:1], tokens[1:5], tokens[5:]
'''
segments = []
indicies.sort()
for i, index in enumerate(indicies):
if i == 0:
segments.append(add_tokens(tokens[:index+1]))
if len(indicies) == 1:
# we need to end the last segment manually
segments.append(add_tokens(tokens[index+1:]))
else:
last_index = indicies[i-1]+1
segments.append(add_tokens(tokens[last_index:index+1]))
if i == len(indicies)-1:
segments.append(add_tokens(tokens[index+1:]))
return segments
def add_tokens(tokens):
segment = {'words': '', 'original_words': '',
'start': tokens[0]['start']}
for i, token in enumerate(tokens):
if i < len(tokens)-1:
segment['words'] += token['word'] + ' '
segment['original_words'] += token['original_word'] + ' '
else:
segment['words'] += token['word']
segment['original_words'] += token['original_word']
segment['end'] = token['end']
if token.get('punctuation'):
segment['punctuation'] = token['punctuation']
return segment
def log_subprocess_output(pipe):
for line in iter(pipe.readline, b''): # b'\n'-separated lines
logging.debug('subprocess stderr: %r', line)
| true
|
0d7492bee0695692470693fc6ebcf11a324f9978
|
Python
|
Jkwnlee/python
|
/pymatgen_practice/v1_dielectic.py
|
UTF-8
| 3,517
| 2.75
| 3
|
[] |
no_license
|
#bin/python3
import pymatgen as pmg
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# need to check how to treat tensor of dielectric:
# Hashin, Z., & Shtrikman, S. Physical Review, 130(1), 129–133. (1963).
# Conductivity of Polycrystals.
# doi:10.1103/physrev.130.129
df1 = pd.read_json('Petousis2017_Scientific_Data.json')
# data from https://datadryad.org/stash/dataset/doi:10.5061/dryad.ph81h
# Petousis, I., Mrdjenovich, D., Ballouz, E. et al. Sci Data 4, 160134 (2017).
# High-throughput screening of inorganic compounds for the discovery of novel dielectric and optical materials.
# https://doi.org/10.1038/sdata.2016.134
# no data from Nature Communications volume 5, Article number: 4845 (2014)
df2 = pd.read_json('Jingyu2020_Scientific_Data.json')
# data from https://archive.materialscloud.org/2020.0010/v1
# Qu, J., Zagaceta, D., Zhang, W. et al. Sci Data 7, 81 (2020).
# High dielectric ternary oxides from crystal structure prediction and high-throughput screening.
# https://doi.org/10.1038/s41597-020-0418-6
df2['band_gap'] = 0
df2['poly_total'] = 0
for i in range(df2.shape[0]):
# User comment:
# (1) To plot result like Jingyu2020, getting only poly e (without adding electronics)
# (2) The df2.e_poly[i][0] is same to (df2.e_total[i][0][0][0] + df2.e_total[i][0][1][1]+df2.e_total[i][0][2][2])/3
# (3) Maybe.. it comes from summation of ionic and electronic term
df2['poly_total'].iloc[i] = df2.e_poly[i][0] #\
#+ (df2.e_electronic[0][0][0][0] + df2.e_electronic[0][0][1][1]+df2.e_electronic[0][0][2][2])/3
df2['band_gap'].iloc[i] = df2.meta[i]['bandgap'][0][0]
font_size, scale = 16, 8
cols , rows = 1,1
# msize = 100
fig, ax = plt.subplots(ncols =cols, nrows=rows, figsize=(cols*scale*1.5,rows*scale))
plt.rcParams['font.size'] = font_size
## Text and line trend study
x= np.array([1e-1, 1e1])
x_shift = 1.4
y_shift = 0.5
for c1 in [2,4,8,16]:
ax.plot(x,(c1/x)**2,'g', '--', linewidth=0.3 )
if (c1/x[0])**2 < 1e3:
ax.text(x[0]*x_shift, (c1/x[0])**2* y_shift, 'c$_{1}=%i$' %c1, color = 'g')
else:
ax.text(c1/(1e3)**0.5*x_shift, 1e3* y_shift, 'c$_{1}=%i$' %c1, color = 'g')
x_shift = 1
y_shift = 0.80
for c2 in [10,20,40,80, 160]:
ax.plot(x,(c2/x),'r', '--', linewidth=0.3 )
if (c2/x[0]) < 1e3:
ax.text(x[0]*x_shift, (c2/x[0])* y_shift, 'c$_{2}=%i$' %c2, color = 'r')
else:
ax.text(c2/1e3*x_shift, 1e3* y_shift, 'c$_{2}=%i$' %c2, color = 'r')
plt1 = sns.scatterplot(data = df1 , x='band_gap', y='poly_total', color = 'black', size=1,
ax = ax, legend= False)#
plt2 = sns.scatterplot(data = df2 , x='band_gap', y='poly_total', color = 'red' , size=1,
ax = ax, legend= False)#, label='Jingyu2020')
plt1l = ax.plot(0,0, 'o', c='black',label='Petousis2017')
plt2l = ax.plot(0,0, 'o', c='red', label='Jingyu2020')
ax.set(xscale = 'log', yscale = 'log',
xlim = [1e-1, 1e1] , ylim=[1e0,1e3],
ylabel = '$\\varepsilon_{\\rm poly}$ (arb.)',
xlabel = '$E_{\\rm g}@GGA$ (eV)',
title = '$c_1 = E_{\\rm g}\cdot \\sqrt{\\varepsilon_{\\rm poly}}$ \
and $c_2 = E_{\\rm g}\cdot \\varepsilon_{\\rm poly}$ ')
ax.grid(which='major',axis='both', color='grey',linestyle='-', linewidth=0.5)
ax.grid(which='minor',axis='both', color='red',linestyle='--', linewidth=0.1)
ax.legend() ## Need to Update
plt.savefig('fig_data_reproduce.png', dpi = 500)
| true
|
77061bf51b7f503221c420f0a34e621a34509aac
|
Python
|
aristotlepenguin/endless-war
|
/ew/cmd/item/itemutils.py
|
UTF-8
| 9,729
| 3.140625
| 3
|
[] |
no_license
|
import sys
from ew.backend import core as bknd_core
from ew.backend import item as bknd_item
from ew.backend.item import EwItem
from ew.static import cfg as ewcfg
from ew.utils import core as ewutils
from ew.utils.combat import EwUser
from ew.static.weapons import weapon_list
"""
Drop item into current district.
"""
def item_drop(
id_item = None,
other_poi = None
):
try:
item_data = EwItem(id_item=id_item)
user_data = EwUser(id_user=item_data.id_owner, id_server=item_data.id_server)
if other_poi == None:
dest = user_data.poi
else:
dest = other_poi
if item_data.item_type == ewcfg.it_cosmetic:
item_data.item_props["adorned"] = "false"
item_data.persist()
bknd_item.give_item(id_user=dest, id_server=item_data.id_server, id_item=item_data.id_item)
except Exception as e:
ewutils.logMsg("Failed to drop item {}: {}.".format(id_item, e))
def get_fish_collection(id_item, id_server):
item = EwItem(id_item=id_item)
# Checks whether the collection is in an apartment "decorate" inventory or the player's inventory
if 'decorate' not in str(item.id_owner):
return 'It\'s a large aquarium, built for whole schools of fish. You can\'t see what\'s inside because you\'re nearly killing yourself carrying it.'
# Create string for the "id_user" corresponding to the collection.
id_item_col = "{}collection".format(id_item)
fish_inv = bknd_item.inventory(id_server=id_server, id_user=id_item_col)
response = ""
if len(fish_inv) == 0:
return "Look at all these- wait, you don't have any fish in here."
elif len(fish_inv) == 1:
response += "There's just one fish in here. It's feeling very loved."
elif len(fish_inv) < 6:
response += "It's pretty spacious in here!"
elif len(fish_inv) < 43:
response += "Look at all these fish!"
else:
response += "This thing is packed!"
response += " There's "
fish_arr = []
for fish in fish_inv:
fish_item = EwItem(fish.get('id_item'))
length = fish_item.item_props.get('length')
# If the fish doesn't have a length, generate it a random average length
if length is None:
length = float((ewcfg.fish_size_range.get(fish_item.item_props.get('size'))[0] + ewcfg.fish_size_range.get(fish_item.item_props.get('size'))[1]) / 2)
fish_item.item_props['length'] = length
fish_item.persist()
# Add the fish's name and length to a list
fish_arr.append("a {} ({} in)".format(fish.get('name'), length))
# Create a response, formatting a nice-looking sentence from the fish name/length list.
response += "{}{}".format(ewutils.formatNiceList(names=fish_arr), ".")
return response
def get_scalp_collection(id_item, id_server):
item = EwItem(id_item=id_item)
# Checks whether the collection is in an apartment "decorate" inventory or the player's inventory
if 'decorate' not in str(item.id_owner):
return 'It\'s a scalp board, detailing the people you\'ve eliminated. Somehow, show and tell in public seems like a bad idea.'
# Create string for the "id_user" corresponding to the collection.
id_item_col = "{}collection".format(id_item)
scalp_inv = bknd_item.inventory(id_server=id_server, id_user=id_item_col)
response = "You take a gander at all these marks.\n __**SHIT LIST**__:"
if len(scalp_inv) == 0:
return "Soon. This board will fill someday."
for scalp in scalp_inv:
scalp_item = EwItem(scalp.get('id_item'))
# Get the name and kill description from the scalp's item_props
victim_name = scalp_item.item_props.get('cosmetic_name').replace('\'s scalp', '').capitalize()
victim_death = scalp_item.item_props.get('cosmetic_desc').replace('A scalp.', '')
for weapon in weapon_list:
# If the scalp's kill description matches a weapon, replace that with the weapon's kill descriptor (ie. "Gunned down")
if weapon.str_scalp == victim_death:
victim_death = "{}{}".format(weapon.str_killdescriptor.capitalize(), '.')
break
response += "\n~~{}~~ *{}*".format(victim_name, victim_death)
return response
def get_soul_collection(id_item, id_server):
item = EwItem(id_item=id_item)
# Checks whether the collection is in an apartment "decorate" inventory or the player's inventory
if 'decorate' not in str(item.id_owner):
return 'It\'s a soul cylinder. You can\'t really tell whose soul is whose. You\'ve been carrying this thing around and all the souls are jostled and queasy.'
# Create string for the "id_user" corresponding to the collection.
id_item_col = "{}collection".format(id_item)
soul_inv = bknd_item.inventory(id_server=id_server, id_user=id_item_col)
if len(soul_inv) == 0:
return "No souls. Just ask anyone."
response = "You look into the cylinder to check how the souls are doing.\n\n"
for soul in soul_inv:
# Get the user_data from the soul's corresponding user
soul_item = EwItem(id_item=soul.get('id_item'))
soul_user = EwUser(id_server=id_server, id_user=soul_item.item_props.get('user_id'))
# If the user doesn't have a defined race, set it to humanoid
if soul_user.race is None or soul_user.race == '':
soul_user.race = ewcfg.race_humanoid #do not persist this!
# Change flavor text based on the soul's race
soul_text = ewcfg.defined_races.get(soul_user.race).get('soul_behavior')
soul_name = soul_item.item_props.get('cosmetic_name')
response += "{} {}\n".format(soul_name, soul_text)
return response
def get_weapon_collection(id_item, id_server):
item = EwItem(id_item=id_item)
# Checks whether the collection is in an apartment "decorate" inventory or the player's inventory
if 'decorate' not in str(item.id_owner):
return "It's a weapon rack. You can't admire its splendor while it's on your back, though."
# Create string for the "id_user" corresponding to the collection.
id_item_col = "{}collection".format(id_item)
weapon_inv = bknd_item.inventory(id_server=id_server, id_user=id_item_col)
if len(weapon_inv) == 0:
return "There are no weapons in here. Arms are meant to be used, not preserved."
response = "You take a look at the archive of your violent history...\n\n"
for wep in weapon_inv:
# Get the weapon's stats, based on its props
weapon_item = EwItem(id_item=wep.get('id_item'))
kills = weapon_item.item_props.get('totalkills', 0)
backfires = weapon_item.item_props.get('totalsuicides', 0)
name = weapon_item.item_props.get('weapon_name')
# If the weapon doesn't have a unique name, give it a generic descriptor
if name is None or name == '':
name = 'Generic {}'.format(weapon_item.item_props.get('weapon_type'))
response += "{}: {} KILLS{}\n".format(name, kills, ", {} BACKFIRES".format(backfires) if backfires > 0 else "")
return response
def get_crop_collection(id_item, id_server):
item = EwItem(id_item=id_item)
# Checks whether the collection is in an apartment "decorate" inventory or the player's inventory
if 'decorate' not in str(item.id_owner):
return "It's a greenhouse, built small enough to carry around (uncomfortably). Your unwieldy grip means you can't sus out what plants are inside."
# Create string for the "id_user" corresponding to the collection.
id_item_col = "{}collection".format(id_item)
crop_inv = bknd_item.inventory(id_server=id_server, id_user=id_item_col)
if len(crop_inv) == 0:
return "There's no crops inside this portable greenhouse. Maybe they're potted instead?"
response = "You glance at the Greenhouse. Inside, there's "
crop_arr = []
for crop in crop_inv:
crop_icon = ewcfg.emote_blank
# Get crop's type and name
crop_item = EwItem(crop.get('id_item'))
crop_type = crop_item.item_props.get('id_food')
crop_name = crop_item.item_props.get('food_name')
# If the crop isn't None, get its matching emote
if crop_type is not None:
crop_icon = ewcfg.crop_icon_map.get(crop_type)
# Add crop to a list with its emote
crop_arr.append("a {} {}".format(crop_name, crop_icon))
# Create a response, formatting a nice-looking sentence from the crop name/emote list
response += "{}{}".format(ewutils.formatNiceList(names=crop_arr), ".")
return response
def get_general_collection(id_item, id_server):
item = EwItem(id_item=id_item)
# Checks whether the collection is in an apartment "decorate" inventory or the player's inventory
if 'decorate' not in str(item.id_owner):
return "It's a multi-item display case. Best viewed when placed."
# Create string for the "id_user" corresponding to the collection.
id_item_col = "{}collection".format(id_item)
item_inv = bknd_item.inventory(id_server=id_server, id_user=id_item_col)
if len(item_inv) == 0:
return "There's nothing in here at the moment."
response = "You examine your preserved collection. Inside is "
item_arr = []
for gen_item in item_inv:
# Add item to a list with its ID
item_arr.append("a {} ({})".format(gen_item.get('name'), gen_item.get('id_item')))
# Create a response, formatting a nice-looking sentence from the item name/ID list
response += "{}{}".format(ewutils.formatNiceList(names=item_arr), ".")
return response
| true
|
544224a2a1ac775182c89fb1dc4fea67eca83290
|
Python
|
Shuaiyicao/leetcode-python
|
/149.py
|
UTF-8
| 1,356
| 3.21875
| 3
|
[] |
no_license
|
# Definition for a point
# class Point:
# def __init__(self, a=0, b=0):
# self.x = a
# self.y = b
class Solution:
# @param points, a list of Points
# @return an integer
def gao(self, a, b):
if a.x == b.x:
return 1<<30
return (1.0 * b.y - a.y) / (b.x - a.x)
def mx(self, lists):
n = len(lists)
if n == 0:
return 0
lists.sort()
i = 0
res = 1
while i + 1 < n:
if abs(lists[i] - lists[i+1]) < 1e-4:
tmp = 1
while i + 1 < n and abs(lists[i] - lists[i+1]) < 1e-4:
i += 1
tmp += 1
i += 1
res = max(res, tmp)
else:
i += 1
return res
def maxPoints(self, points):
if len(points) < 3:
return len(points)
n = len(points)
res = 2
for i in xrange(n):
lists = []
same = 1
for j in xrange(i + 1, n):
if points[j].x == points[i].x and points[j].y == points[i].y:
same += 1
else:
lists.append(self.gao(points[i], points[j]))
res = max(res, self.mx(lists) + same)
return res
| true
|
ec9709d75b748e96208f8e739e393a29d4e4b31a
|
Python
|
gilsonaureliano/Python-aulas
|
/python_aulas/desaf104_def_notas_dicionario.py
|
UTF-8
| 863
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
def notas(*n, sit=False):
"""
'Dicionario de notas'
:param n: 'Notas dos alunos'
:param sit: 'Situação final"
:return: 'Total, maior, menor, situação(op)'
"""
global men, med
dic = {}
dic['total'] = len(n)
mai = 0
total = 0
for c, v in enumerate(n):
if n[c] > mai:
mai = v
if c == 0:
men = v
if c > 1:
if v < men:
men = v
total += v
med = total / len(n)
dic['maior'] = mai
dic['menor'] = men
dic['media'] = med
if sit:
if med <= 5:
dic['situação'] = 'RUIM'
elif 5 < med < 7:
dic['situação'] = 'REGULAR'
else:
dic['situação'] = 'BOA'
return dic
# programa principal
resp = notas(6, 7, 5, 9, 9, 9, sit=True)
print(resp)
help(notas)
| true
|
31f6ba18e1b7fd7deccc0fd547c8d6d330ae81dc
|
Python
|
N0tH3r0/Python-Estrutura-Sequencial
|
/Ex 08.py
|
UTF-8
| 337
| 4.5625
| 5
|
[] |
no_license
|
#Make a Program that asks how much you earn per hour and the number of hours worked in the month. Calculate and show your total salary in that month.
fhour = float(input("Put here your rate per hour: "))
fmonth = float(input("Put number of hours worked in the month: "))
res = fhour * fmonth
print("Your salary in a month was: ", res )
| true
|
a6fe065223066146f257e74019c57740685572e5
|
Python
|
ariane-lozachmeur/phylogenetictree
|
/benchmark.py
|
UTF-8
| 1,497
| 2.734375
| 3
|
[] |
no_license
|
from ete3 import Tree
import pandas as pd
from Bio import Phylo
# File to benchmark our methods with MUSCLE and MAFFT using the Robinson Foulds metric.
# It create a benchmark_results.csv file in the "results" repository and prints all of the tree that can then be saved for further analysis.
prots = ['LRRD1','TRAF6','KCNB2']
methods = ['single_tree','average_tree','centroid_tree','muscle','mafft','centroidmsa_tree']
def main(prots, methods):
results = pd.DataFrame( )
for p in prots:
try:
t1 = Tree(open('results/'+p+'_uniprot.dnd').read())
except:
print('WARNING: There is no uniprot file for the protein %s' %p)
continue
print(p,'uniprot')
# Phylo.draw(Phylo.read('results/'+p+'_uniprot.dnd', "newick"))
scores = []
for m in methods:
print(p,m)
try:
t2 = Tree(open('results/'+p+'_'+m+'.dnd').read())
except:
print('WARNING: There is no %s file for the protein %s' %(m,p))
continue
# Phylo.draw(Phylo.read('results/'+p+'_'+m+'.dnd', "newick"))
rf, max_rf, common_leaves, parts_t1, parts_t2, i, j = t1.robinson_foulds(t2, unrooted_trees=True)
scores.append(float(rf)/float(max_rf))
results = results.append(pd.Series(scores,name=p))
results.columns = methods
results.to_csv('results/benchmark_results.csv')
if __name__ == '__main__':
main(prots, methods)
| true
|
aa1be29d6d69b36711e0c0223345852d1d1772b7
|
Python
|
ben-kolber/openCV_on_raspi
|
/plan_path.py
|
UTF-8
| 3,954
| 2.8125
| 3
|
[] |
no_license
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import face_recognition
from PIL import Image
import sys
import random
from matplotlib.pyplot import figure
def show(string, image):
cv2.imshow(string, image)
cv2.waitKey()
img = cv2.imread(
"/Users/benjaminkolber/Desktop/Personal Programming /open_cv/pics/sample_path.png")
# get image grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply gaussian blur
gaussian_blur = cv2.GaussianBlur(gray, (5, 5), 0)
# binary conversion using
thresh = 127
im_bw_2 = cv2.threshold(gaussian_blur, thresh, 255, cv2.THRESH_BINARY)[1]
# inverse of the binary image
inverse = cv2.bitwise_not(im_bw_2)
# sobel Line Extraction
sobelx = cv2.Sobel(inverse, cv2.CV_64F, 1, 0, ksize=5)
sobely = cv2.Sobel(inverse, cv2.CV_64F, 0, 1, ksize=5)
sobelxy = cv2.add(sobelx, sobely)
# Laplacian Line Extraction
laplacian = cv2.Laplacian(inverse, cv2.CV_64F, 0)
# XY coordinates
right_line = []
left_line = []
i = 0
CUTOFF = 130 # cut off top 1/3 of image pixel 322 / 922
MAX_ROWS = j = 921
MAX_COLUMNS = k = 1226
# base case -> find start of line.
L_found = False
R_found = False
for i in range(MAX_COLUMNS):
if(not L_found): # first left point
i += 1
if(laplacian[j][i] > 0 and not L_found):
L_found = True
if(not R_found): # first right point
k -= 1
if(laplacian[j][k] > 0 and not R_found):
R_found = True
if (k == 0 or i == MAX_COLUMNS):
j -= 1
if(L_found and R_found):
break
left_line.append([i, j]) # left border coordinates
right_line.append([k, j]) # right border coordinates
right_range = []
left_range = []
# find left line trajectory
LEFT = i
RIGHT = k # first point where left line was found to start
iteration = 6
optimal_path = []
while (j > CUTOFF):
found_right = False
found_left = False
if (laplacian[j][RIGHT] <= 0): # Right line does not continue straight
# search close vicinity
RIGHT -= (int)(iteration / 2)
for i in range(iteration): # search close vicinity
if (laplacian[j][RIGHT] <= 0):
RIGHT += 1
else:
found_right = True
else:
found_right = True
if(laplacian[j][LEFT] <= 0): # Left line does not continue straight
LEFT -= (int)(iteration / 2)
for i in range(iteration): # search close vicinity
if(laplacian[j][LEFT] <= 0):
LEFT += 1
else:
found_left = True
else:
found_left = True
# check if a certain point was not found
if (not found_left or not found_right):
j -= 1 # move up a row
LEFT -= (int)(iteration / 2)
RIGHT -= (int)(iteration / 2)
else:
if ((RIGHT - LEFT) > 40 and j % 15 == 0): # check if pixels belong to same line more or less
optimal_path.append([((RIGHT + LEFT)/2), j])
right_line.append([RIGHT, j])
left_line.append([LEFT, j])
right_range.append([((RIGHT + LEFT)/2 + RIGHT) / 2, j])
left_range.append([((RIGHT + LEFT)/2 + LEFT) / 2, j])
j -= 1
else:
right_line.append([RIGHT, j])
left_line.append([LEFT, j])
right_range.append([((RIGHT + LEFT)/2 + RIGHT) / 2, j])
left_range.append([((RIGHT + LEFT)/2 + LEFT) / 2, j])
j -= 1
figure(num=None, figsize=(12, 12), facecolor='w', edgecolor='k')
# plt.plot(optimal_X, optimal_Y, marker='v', color='r')
plt.plot(*zip(*right_line), marker='.', color='k', ls='')
plt.plot(*zip(*left_line), marker='.', color='k', ls='')
plt.plot(*zip(*right_range), marker='.', color='r', ls='')
plt.plot(*zip(*left_range), marker='.', color='r', ls='')
plt.plot(*zip(*optimal_path), marker='^', color='b', ls='')
plt.gca().invert_yaxis()
# plt.gca().invert_xaxis()
show('gray', gray)
show('binary', inverse)
show('laplacian', laplacian)
plt.show()
| true
|
4549c92416a7bf97f82537f87bf96633b77700a6
|
Python
|
jaredrokowski/school_code
|
/Python/heatmap.py
|
UTF-8
| 1,087
| 2.515625
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
def heatMapForDATA( DATA, rowlabels, collabels, fignum, userowlabels=True, usecollabels=False ) :
## Need corners of quads for color map
X = [ [] for i in range(0,len(collabels)+1) ]
Y = [ [] for j in range(0,len(collabels)+1) ]
for j in range(0,len(collabels)+1) :
for i in range(0,len(rowlabels)+1) :
X[j].append(j)
Y[j].append(i)
Z = [ [] for i in range(0,len(collabels)) ]
for j in range(0,len(collabels)):
for k in range(0,len(rowlabels)) :
Z[j].append(DATA[k][j])
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
fig = plt.figure(fignum)
ax = fig.add_subplot(111)
cmap = plt.get_cmap('hot')
cm = ax.pcolormesh(X, Y, Z, cmap=cmap )
ax.set_ylim([0.,len(rowlabels)])
ax.set_xlim([0.,len(collabels)])
if usecollabels :
xticks = [ 0.5 + j for j in range(0,len(collabels)) ]
ax.set_xticks(xticks)
ax.set_xticklabels(collabels)
if userowlabels :
yticks = [ 0.5 + j for j in range(0,len(rowlabels)) ]
ax.set_yticks(yticks)
ax.set_yticklabels(rowlabels)
plt.colorbar(cm, ax=ax)
plt.draw()
| true
|
ad94471530a1e6860273ee679f70fb161857c975
|
Python
|
briandleahy/optimal-equiareal-map
|
/optimalmap.py
|
UTF-8
| 8,136
| 3.546875
| 4
|
[] |
no_license
|
# TODO
# The problem here is that the metric diverges like 1/sin(2theta) at the
# poles (so a 1/x singularity), which is _not integrable_. So while
# you don't get infinities, you do get something which diverges as you
# add more points. This is why you get weirdness with the maps as the degree
# goes higher.
# The solution is simple: optimize the metric on the punctured sphere,
# with a certain range away from the poles. Doing something like +- 5 deg
# latitude gets you out of the regions of interest everywhere (within
# antarctic mainland and within the arctic ocean.) -- northernmost land
# is 83d40m northn.
# An alternative option is to use an alternative equiareal projection
# such as a Mollweide or Sanson projection and do the quadrature over
# that.
# To do this, you should create a general projection class which takes
# (theta, phi) and maps to (x, y), and calculates a metric at those
# points.
import itertools
import numpy as np
polyval2d = np.polynomial.polynomial.polyval2d # shorter alias
class ChainTransform(object):
def __init__(self, transforms):
self.transforms = transforms
def evaluate(self, x, y):
for transform in self.transforms:
x, y = transform.evaluate(x, y)
return x, y
class CoordinateTransform(object):
def __init__(self, degree=(5, 5)):
"""
Represents a coordinate transformation as a polynomial.
Since we don't care about overall shifts (x -> x + a) we start
at linear order in the polynomial by keeping the zeroth term
zero.
"""
self.degree = degree
shp = [2] + (np.array(degree)+1).tolist()
self._coeffs = np.zeros(shp, dtype='float') # 2 for x,y
self._mask = self._create_mask_for_parameters()
# Then we want to start at a reasonable param value (X=x etc)
self._coeffs[0, 1, 0] = 1
self._coeffs[1, 0, 1] = 1
def update(self, params):
self._coeffs[self._mask] = params.copy()
@property
def params(self):
return self._coeffs[self._mask].copy()
def evaluate(self, x, y):
"""New coordinates as a function of the old"""
return [polyval2d(x, y, c) for c in self._coeffs]
def evaluate_derivative(self, x_new='X', x_old='x'):
"""returns the matrix that, when polyval'd, gives dX/dx where
X is the new coordinate and x the old
Parameters
----------
x_new : {"X", "Y"}
x_old : {"x", "y"}
"""
coef_ind = {'X': 0, 'Y': 1}[x_new.upper()]
aij_ind = {'x': 0, 'y': 1}[x_old.lower()]
shp = {'x': (-1, 1), 'y': (1, -1)}[x_old.lower()]
aij = self._coeffs[coef_ind]
t = np.arange(aij.shape[aij_ind]).reshape(shp)
return np.roll(aij * t, -1, axis=aij_ind)
def _create_mask_for_parameters(self):
mask = np.ones(self._coeffs.shape, dtype='bool')
# 1. mask out the DC terms
mask[:, 0, 0] = False
# 2. mask out odd cross terms, so that P_x(x, y) is even in y
# and P_y(x, y) is even in x
for index_x in range(1, mask.shape[1], 2):
mask[1, index_x, :] = False
for index_y in range(1, mask.shape[2], 2):
mask[0, :, index_y] = False
return mask
class LambertToSansonTransform(object):
def evaluate(self, x, y):
phi = x
theta = np.arcsin(y)
return phi * np.cos(theta), theta
class LambertCylindricalQuadrature(object):
def __init__(self, nxpts=30):
self.nxpts = nxpts
self.nypts = int(nxpts / np.pi) + 1
self._setup_pts()
def _setup_pts(self):
# x runs from -pi, pi; y from -1, 1. So we need to adjust px, wx:
px, wx = generate_leggauss_pts_and_wts(-np.pi, np.pi, npts=self.nxpts)
py, wy = generate_leggauss_pts_and_wts(-1, 1, npts=self.nypts)
xp, yp = np.meshgrid(px, py, indexing='ij')
self._xypts = np.array([[x, y] for x, y in zip(xp.flat, yp.flat)])
self._xywts = np.outer(wx, wy).ravel()
self._xywts_sqrt = np.sqrt(self._xywts)
def integrate(self, func):
fxy = func(self._xypts)
return np.sum(fxy * self._xywts)
def integrate_as_sumofsquares(self, func):
"""Returns the integral as a set of pts such that \int f(x)^2 =
sum (ans)^2 where ans is the output of this function"""
fxy = func(self._xypts)
return np.ravel(fxy * self._xywts_sqrt)
@property
def pts(self):
return self._xypts.copy()
@property
def wts(self):
return self._xywts.copy()
@property
def sqrt_wts(self):
return self._xywts_sqrt.copy()
class LambertProjection(object):
def __init__(self, xypts):
"""xypts = [N, 2] = (x, y) in lambert projection = (phi, cos(theta))"""
self.metric = np.zeros([xypts.shape[0], 2, 2])
sin2theta = 1 - xypts[:, 1]**2
self.metric[:, 0, 0] = 1.0 / sin2theta
self.metric[:, 1, 1] = sin2theta
# -- we don't need to regularize sin(2theta) with a +eps b/c
# the legendre points aren't selected at y=+-1
class SansonProjection(object):
def __init__(self, xypts):
"""metric is stored in order longitude-like, latitude-like"""
phi = xypts[:, 0]
theta = np.arcsin(xypts[:, 1])
phi_sin_theta = phi * np.sin(theta)
self.metric = np.zeros([xypts.shape[0], 2, 2])
self.metric[:, 0, 0] = 1 + phi_sin_theta**2
self.metric[:, 1, 0] = phi_sin_theta
self.metric[:, 0, 1] = phi_sin_theta
self.metric[:, 1, 1] = 1
class MetricCostEvaluator(object):
def __init__(
self,
projection_name='lambert',
nquadpts=30,
degree=(5, 5),
area_penalty=1.):
self.quadobj = LambertCylindricalQuadrature(nxpts=nquadpts)
self.projection = self._make_projection(projection_name)
self.transform = CoordinateTransform(degree=degree)
self.area_penalty = area_penalty
def call(self, params):
"""Returns a vector whose sum-of-squares is a cost"""
self.update(params)
dg, da = self.calculate_metric_residuals()
return np.hstack([dg, self.area_penalty * da])
def update_area_penalty(self, new_penalty):
self.area_penalty = new_penalty
@property
def params(self):
return self.transform.params
def update(self, params):
self.transform.update(params)
def calculate_metric_residuals(self):
new_metric = self._calculate_metric()
deviation_from_isometry = new_metric - np.eye(2).reshape(1, 2, 2)
deviation_from_equiareal = np.linalg.det(new_metric) - 1.
deviation_from_isometry *= self.quadobj.sqrt_wts.reshape(-1, 1, 1)
deviation_from_equiareal *= self.quadobj.sqrt_wts
return deviation_from_isometry.ravel(), deviation_from_equiareal
def _calculate_metric(self):
old_metric = self.projection.metric
# 2. The transformation matrix dX/dx
xy = self.quadobj.pts
dXdx = np.zeros([xy.shape[0], 2, 2], dtype='float')
for a, b in itertools.product([0, 1], [0, 1]):
aij = self.transform.evaluate_derivative(
x_new='XY'[a], x_old='xy'[b])
dXdx[:, a, b] = polyval2d(xy[:, 0], xy[:, 1], aij)
# 3. The new metric
new_metric = np.einsum('...ij,...ik,...jl', old_metric, dXdx, dXdx)
return new_metric
def _make_projection(self, projection_name):
projection_name = projection_name.lower()
if 'lambert' == projection_name:
cls = LambertProjection
elif 'sanson' == projection_name:
cls = SansonProjection
else:
raise ValueError(f"Invalid projection: {projection_name}")
return cls(self.quadobj.pts)
def l2av(x):
return np.mean(x*x)
def generate_leggauss_pts_and_wts(lower, upper, npts=30):
pts, wts = np.polynomial.legendre.leggauss(npts)
pts += 1
pts *= 0.5 * (upper - lower)
pts += lower
wts *= 0.5 * (upper - lower)
return pts, wts
| true
|
cc2478fd0c1788ce14a5b7ef7b1b071ecd5f4fcc
|
Python
|
mushahiroyuki/beginning-python
|
/Chapter09/0916-generator-comprehension.py
|
UTF-8
| 975
| 3.515625
| 4
|
[] |
no_license
|
#@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。
#ファイル名 Chapter09/0916-generator-comprehension.py
#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。
#@@range_begin(list2) # ←この行は無視してください。本文に引用するためのものです。
g = ((i + 2) ** 2 for i in range(2, 27))
print(next(g)) #← 16
print(next(g)) #← 25
print(next(g)) #← 36
print(next(g)) #← 49
print(next(g)) #← 64
#@@range_end(list2) # ←この行は無視してください。本文に引用するためのものです。
print("-----")
#@@range_begin(list3) # ←この行は無視してください。本文に引用するためのものです。
print(sum(i ** 2 for i in range(10))) #← 285 = 1+4+9+16+25+36+49+64+81
#@@range_end(list3) # ←この行は無視してください。本文に引用するためのものです。
| true
|
3018fb23d501984e292a2207d490310ccc005974
|
Python
|
MoriMorou/GB_Python
|
/Methods of collecting and processing data from the Internet/Lesson_6_Selenium For Python. Parsim dynamic and private data/selenium_example.py
|
UTF-8
| 1,678
| 2.65625
| 3
|
[] |
no_license
|
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://mail.ru")
# печать всего html кода страницы
print(driver.page_source)
# выполнение скрипта по заголовку страницы
assert "Mail" in driver.title
driver.close()
# cartridges = []
#
#
# for item in range(1, 10):
# elem = driver.find_element_by_name("ctl00$MainContent$cartridgefinder$uxStep1Manufacturer")
# all_options = elem.find_elements_by_tag_name("option")
# brand = all_options[1].get_attribute("value")
# all_options[1].click()
# elem = driver.find_element_by_name("ctl00$MainContent$cartridgefinder$uxStep2machineType")
# option = elem.find_elements_by_tag_name("option")[1]
# family = option.get_attribute("value")
# option.click()
# elem = driver.find_element_by_name("ctl00$MainContent$cartridgefinder$uxStep3MachineName")
# option = elem.find_elements_by_tag_name("option")[item]
# printer = option.get_attribute("value")
# option.click()
# option = driver.find_elements_by_id("MainContent_cartridgefinder_Button1")[0]
# option.click()
# names = driver.find_elements_by_xpath("//a[contains(@id, 'MainContent_SearchResults_uxSearchResults_GridView_uxProductLinkTitle')]")
# for name in names:
# if ']' in name.text:
# cartridge_code = name.text.split()[-3]
# else:
# cartridge_code = name.text.split()[-1]
# cartridges.append((brand, family, printer, name.text, cartridge_code))
# driver.get("https://www.officerange.com/cartridge-finder")
# driver.close()
#
# for cartridge in cartridges:
# print(cartridge)
| true
|
efa8116e5f20bd5f77d0e57edc3b4441ca34156a
|
Python
|
yesmider/TradingViewer
|
/bad.py
|
UTF-8
| 722
| 3.234375
| 3
|
[] |
no_license
|
from random import *
import os
your_lucky_number = []
your_lucky_number.append(randint(1,50000))
run_time = 0
bad_luck_checker = 1
item_num = 0
total = 0
while item_num < 10:
if randint(1,50000) in your_lucky_number:
print('got in '+str(run_time))
total += run_time
run_time = 0
your_lucky_number = [randint(1,50000)]
item_num +=1
print(item_num)
else:
bad_luck_checker = 1
if item_num < 4:
while bad_luck_checker:
x = randint(1,50000)
if x not in your_lucky_number:
your_lucky_number.append(x)
bad_luck_checker = 0
run_time += 1
print('got 10 item in '+str(total))
os.system('pause')
| true
|
58a713308555a9e588a06ed87bed6d13e54b2b57
|
Python
|
osamudio/python-scripts
|
/fibonacci_2.py
|
UTF-8
| 303
| 4.03125
| 4
|
[] |
no_license
|
def fib(n: int) -> None:
"""
prints the Fibonacci sequence of the first n numbers
"""
a, b = 0, 1
for _ in range(n):
print(a, end=' ')
a, b = b, a+b
print()
return
if __name__ == "__main__":
n = int(input("please, enter a number: "))
fib(n)
| true
|
f355d42dd52cdd4438a45986285fe1905e4e8c64
|
Python
|
jhgdike/leetCode
|
/leetcode_python/901-1000/999.py
|
UTF-8
| 1,589
| 3.109375
| 3
|
[] |
no_license
|
class Solution(object):
def numRookCaptures(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
res = 0
m, n = self.find_r(board)
for i in range(m - 1, -1, -1):
if board[i][n] == 'B':
break
if board[i][n] == 'p':
res += 1
break
for i in range(m + 1, len(board)):
if board[i][n] == 'B':
break
if board[i][n] == 'p':
res += 1
break
for i in range(n - 1, -1, -1):
if board[m][i] == 'B':
break
if board[m][i] == 'p':
res += 1
break
for i in range(n + 1, len(board[0])):
if board[m][i] == 'B':
break
if board[m][i] == 'p':
res += 1
break
return res
def find_r(self, board):
m = n = 0
while m < len(board):
n = 0
while n < len(board[0]):
if board[m][n] == 'R':
return m, n
n += 1
m += 1
board = [[".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", "p", ".", ".", ".", "."],
[".", ".", ".", "R", ".", ".", ".", "p"], [".", ".", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", "p", ".", ".", ".", "."],
[".", ".", ".", ".", ".", ".", ".", "."], [".", ".", ".", ".", ".", ".", ".", "."]]
print(Solution().find_r(board))
| true
|
0b988079500d6536abbe012425ef622e2a027329
|
Python
|
stanislavmarochok/TensorflowImageClassification
|
/main.py
|
UTF-8
| 17,237
| 2.625
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import sklearn
import sklearn.preprocessing as ppc
import random
import pickle
import json
import os
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, LeakyReLU
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.python.keras.models import load_model
from tensorflow.keras.callbacks import TensorBoard
DATADIR = "d:\Study\Ing\\1 semester\student\I-SUNS\Zadanie 5\\files\data\images"
IMG_SIZE = 50
CONV_NETWORK_MODEL_SAVED_DIR = "conv_network_model_saved"
CONV_NETWORK_MODEL_NAME = "conv_network_model.h5"
CONV_NETWORK_FIT_HISTORY = "conv_network_git_history"
# change this variable to a category you want to classify images
# possible values are "gender", "masterCategory", "usage", "season"
CLASSIFICATION_CATEGORY = "masterCategory"
# *****************************************************************************
# This function prints unique values from *column of *df (DataFrame)
# *****************************************************************************
def print_unique_values_for_column(df, column):
unique_values = df[column].unique().tolist()
print("{: >15} {: 4} ".format(column, len(unique_values)), unique_values)
# *****************************************************************************
# This function is used for printing unique values from DataFrame
# *****************************************************************************
def print_unique_values(df):
print_unique_values_for_column(df, 'gender')
print_unique_values_for_column(df, 'masterCategory')
print_unique_values_for_column(df, 'season')
print_unique_values_for_column(df, 'usage')
# ******************************************************************************
# This function saves datasets X and y to files X.pickle and y.pickle
# With the help of library pickle
# ******************************************************************************
def save_dataset(X, y):
pickle_out = open("X.pickle", "wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle", "wb")
pickle.dump(y, pickle_out)
pickle_out.close()
# ******************************************************************************
# This function saves datasets X and y to files X.pickle and y.pickle
# With the help of library pickle
# ******************************************************************************
def save_model(model, history):
# create sub folder if not exists
if not os.path.exists(CONV_NETWORK_MODEL_SAVED_DIR):
os.makedirs(CONV_NETWORK_MODEL_SAVED_DIR)
# save model
model.save(os.path.join(CONV_NETWORK_MODEL_SAVED_DIR, CONV_NETWORK_MODEL_NAME))
# save fit history as json dictionary
with open(os.path.join(CONV_NETWORK_MODEL_SAVED_DIR, CONV_NETWORK_FIT_HISTORY), mode='w') as f:
json.dump(history, f)
# ******************************************************************************
# This function prints a plot with a history of Convolution Network
# Safely copy-pasted from
# https://www.datacamp.com/community/tutorials/convolutional-neural-networks-python
# ******************************************************************************
def show_plot(history):
accuracy = history['accuracy']
val_accuracy = history['val_accuracy']
loss = history['loss']
val_loss = history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, 'bo', label='Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# ******************************************************************************
# This function visualizes filters of a model of a Convolution Network
# Carefully copy-pasted from
# https://machinelearningmastery.com/how-to-visualize-filters-and-feature-maps-in-convolutional-neural-networks/
# ******************************************************************************
def visualize_filters(model):
filters, biases = model.layers[4].get_weights()
f_min, f_max = filters.min(), filters.max()
filters = (filters - f_min) / (f_max - f_min)
n_filters, ix = 10, 1
for i in range(n_filters):
f = filters[:, :, :, i]
for j in range(3):
ax = plt.subplot(n_filters, 3, ix)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(f[:, :, j], cmap='gray')
ix += 1
plt.show()
# ******************************************************************************
# This function is used to test predictions of a trained neural network
# on my own image (not from dataset)
# ******************************************************************************
def test_conv_network_on_image(image_path):
if not os.path.exists(image_path):
print("Image", image_path, "not exists")
return
feature = []
try:
img_array = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(img_array, cmap="gray")
plt.show()
feature.append(img_array)
except Exception as e:
print("Some error occurred while preparing an image")
return
feature = np.array(feature).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# normalizing
feature = np.array(feature / 255.0)
# image is prepared, now lets test our network
model, history = get_convolutional_network_model()
predictions = model.predict(feature)
predicted_labels = np.argmax(predictions, axis=1)
# get a label encoder to get a text value of predicted value
df = get_dataframe(False)
le = ppc.LabelEncoder()
le.fit(df[CLASSIFICATION_CATEGORY])
# getting all unique values for our special classification category from the dataset
# converting them to text, numbers, and one-hot representation
unique_values_text = np.unique(df[CLASSIFICATION_CATEGORY]).tolist()
unique_values_numbers = le.transform(unique_values_text)
unique_values_numbers_one_hot = to_categorical(unique_values_numbers)
# printing map of unique values, just because I can lol why not
for i in range(len(unique_values_text)):
print("{: 3} {: <10} ".format(unique_values_numbers[i], unique_values_text[i]),
unique_values_numbers_one_hot[i])
print(
"Predicted category of image",
image_path,
"for category",
CLASSIFICATION_CATEGORY,
"is",
le.inverse_transform([predicted_labels])[0])
# *******************************************************
# This function shows an image from array of obtained images
# This is just a check to control that we have written images to array
# in a correct form
# *******************************************************
def show_image_from_array(image):
plt.imshow(image, cmap="gray")
plt.show()
# ******************************************************************************
# This function creates a model of Convolution Network and returns its object
# ******************************************************************************
def get_convolutional_network_model(features_train=None, labels_train=None):
# if trained model and its history are already saved - load them
if os.path.isfile(os.path.join(CONV_NETWORK_MODEL_SAVED_DIR, CONV_NETWORK_MODEL_NAME)) \
and os.path.isfile(os.path.join(CONV_NETWORK_MODEL_SAVED_DIR, CONV_NETWORK_FIT_HISTORY)):
model = load_model(os.path.join(CONV_NETWORK_MODEL_SAVED_DIR, CONV_NETWORK_MODEL_NAME))
with open(os.path.join(CONV_NETWORK_MODEL_SAVED_DIR, CONV_NETWORK_FIT_HISTORY), "rb") as f:
history = keras.callbacks.History()
history.history = json.load(f)
# else create new ones
else:
# -----------------------------------------------------------------------------------
# the first structure of the network
# ***********************************************************************************
# model = Sequential()
#
# model.add(Conv2D(256, (3, 3), input_shape=features_train.shape[1:]))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# model.add(Conv2D(256, (3, 3)))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# model.add(Flatten())
# model.add(Dense(64))
#
# model.add(Dense(int(labels_train.shape[1])))
# model.add(Activation('softmax'))
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# the second (better) structure of the network
# **********************************************************************************************************
model = Sequential()
model.add(Conv2D(
32,
kernel_size=(3, 3),
activation='linear',
padding='same',
input_shape=features_train.shape[1:]))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='linear', padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='linear', padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='linear'))
model.add(LeakyReLU(alpha=0.1))
model.add(Dropout(0.3))
model.add(Dense(int(labels_train.shape[1]), activation='softmax'))
# -------------------------------------------------------------------------------------------------
model.compile(
loss="categorical_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=0.01),
metrics=['accuracy'])
history = model.fit(
features_train,
labels_train,
batch_size=64,
validation_split=0.2,
epochs=25,
callbacks=[TensorBoard(log_dir='logs/{}'.format("conv-network-{}".format(int(time.time()))))],
verbose=1)
save_model(model, history.history)
return model, history
# **************************************************************
# This function reads images from DATADIR directory
# **************************************************************
def obtain_images(df):
training_data = []
def create_training_data():
path = DATADIR
for img in os.listdir(path):
# uncomment this condition if you want to limit your data
# if len(training_data) > 2000:
# break
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
img_id = os.path.splitext(img)[0]
row = df.loc[df['id'] == img_id]
# if there is no record for this image - skip this looser ha ha lol
if row.shape[0] == 0:
continue
# if you would like to train the model for image classification by
# another parameter (for example, masterCategory, usage, season) - just change the value of
# CLASSIFICATION_CATEGORY to whatever you want and it will work without any problems
# * of course classification category must be in the given dataset
row = row[CLASSIFICATION_CATEGORY].values[0]
training_data.append([new_array, row])
print(len(training_data))
except Exception as e:
pass
create_training_data()
random.shuffle(training_data)
features_list = []
labels_list = []
for features, label in training_data:
features_list.append(features)
labels_list.append(label)
features_list = np.array(features_list).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
save_dataset(features_list, labels_list)
return features_list, labels_list
# ********************************************************************************************
# This function creates a dataframe from csv file style.csv and returns it
# ********************************************************************************************
def get_dataframe(encode: bool = True):
def encode_data(df):
le = ppc.LabelEncoder()
for column in df.columns:
if column != 'id':
df[column] = le.fit_transform(df[column])
columns = [
'id',
'gender',
'masterCategory',
'subCategory',
'articleType',
'baseColour',
'season',
'year',
'usage',
'productDisplayName'
]
trash = [
'productDisplayName',
'year',
'baseColour',
'subCategory',
'articleType'
]
data = pd.read_csv('styles.csv', header=None, names=columns)
data.drop(trash, inplace=True, axis=1)
data.dropna(inplace=True)
data.drop_duplicates()
data = data.iloc[1:]
# TODO: pandas.get_dummies()
if encode:
encode_data(data)
data.apply(pd.to_numeric)
return data
# ********************************************************************************************
# This function prepares data for training, reads styles.csv and obtains images
# If started, continues about 7 minutes
# ********************************************************************************************
def prepare_data():
data = get_dataframe()
return obtain_images(data)
# ***********************************************
# This function trains a Convolution Network
# ***********************************************
def conv_net(X, y):
# normalizing values so they are in range 0 - 1
X = np.array(X / 255.0)
# OneHot encoding, it is necessary
y = to_categorical(np.array(y))
features_train, features_test, labels_train, labels_test = train_test_split(X, y, test_size=0.33, random_state=3)
model, history = get_convolutional_network_model(X, y)
model.summary()
evaluation = model.evaluate(features_test, labels_test, verbose=0)
print('Test loss:', evaluation[0])
print('Test accuracy:', evaluation[1])
predictions = model.predict(features_test)
predicted_labels = np.argmax(predictions, axis=1)
labels = np.argmax(labels_test, axis=1)
report = sklearn.metrics.classification_report(labels, predicted_labels)
print("classification_report:\n", report)
confusion_matrix = sklearn.metrics.confusion_matrix(y_true=labels, y_pred=predicted_labels)
print("confusion_matrix:\n", confusion_matrix)
visualize_filters(model)
show_plot(history.history)
# ****************************************************************************************************
# This function returns existing datasets X and y if they exist or creates new ones if not
# ****************************************************************************************************
def get_X_y():
if os.path.isfile("X.pickle") and os.path.isfile("y.pickle"):
with open("X.pickle", "rb") as f:
features = pickle.load(f)
with open("y.pickle", "rb") as f:
labels = pickle.load(open("y.pickle", "rb"))
else:
features, labels = prepare_data()
return features, labels
# ****************************************************************************************************
# This function removes saved data, such as "X.pickle", "y.pickle",
# directory CONV_NETWORK_MODEL_SAVED_DIR with saved model and its training history
# Call this function if you want to clear data and begin training from the very beginning
# ****************************************************************************************************
def remove_saved_data():
import shutil
if os.path.isfile("X.pickle") and os.path.isfile("y.pickle"):
os.remove("X.pickle")
os.remove("y.pickle")
if os.path.exists(CONV_NETWORK_MODEL_SAVED_DIR):
shutil.rmtree(CONV_NETWORK_MODEL_SAVED_DIR)
if __name__ == '__main__':
# uncomment next line if you want to remove saved data
# # # # # # remove_saved_data()
X, y = get_X_y()
conv_net(X, y)
test_conv_network_on_image('teniska.png')
| true
|
e75fdf5eef06abc353ed1d26a358619cec7d9c07
|
Python
|
henneyhong/Python_git
|
/python문제풀이/prac0528.py
|
UTF-8
| 339
| 2.515625
| 3
|
[] |
no_license
|
import os
import random
from datetime import datetime
import time
for num_a in range(1, 11):
value = num_a*100000
value = str(value)
print(value)
timestamp = time.mktime(datetime.today().timetuple())
s = str(timestamp)
print(s)
with open("log\count_log.txt",'w', encoding="utf8")as f:
f.write(value)
f.write(s)
f.close()
| true
|
92ea3931796b15bad2418a93484b0994f52ea6d2
|
Python
|
gilperopiola/autocursor-py
|
/autocursor.py
|
UTF-8
| 1,254
| 3.046875
| 3
|
[] |
no_license
|
import pyautogui
import time
import random
size = pyautogui.size()
width = size[0]
height = size[1]
random_nuance_min = 0.0025
random_nuance_max = 0.479
random_nuance = random.uniform(random_nuance_min, random_nuance_max)
# move to following and click and go to middle of screen
pyautogui.moveTo(1085, 255, duration = 1 + random_nuance)
random_nuance = random.uniform(random_nuance_min, random_nuance_max)
time.sleep(0.1 + random_nuance / 2)
random_nuance = random.uniform(random_nuance_min, random_nuance_max)
pyautogui.click(1085, 255)
time.sleep(0.6 + random_nuance / 2)
random_nuance = random.uniform(random_nuance_min, random_nuance_max)
pyautogui.moveTo(width/2, height/2, duration = 0.5 + random_nuance)
random_nuance = random.uniform(random_nuance_min, random_nuance_max)
time.sleep(0.15 + random_nuance / 2)
# start scrolling
total_pages_to_fetch = 10 # 10 = 150 accounts~, 100 = 1500~
scrolls_per_fetch = 12 + random.randint(0, 3)
scroll_force = -40 + random.randint(-3, 3)
for i in range(total_pages_to_fetch):
for i in range(scrolls_per_fetch):
pyautogui.scroll(scroll_force)
random_nuance = random.uniform(random_nuance_min, random_nuance_max)
time.sleep(0.03 + random_nuance / 2)
time.sleep(1.5 + random_nuance)
| true
|
f5fc2ff0969a299b577e096cc909de2fa80a6a0b
|
Python
|
Darlley/Python
|
/Curso em Video/ex034.py
|
UTF-8
| 170
| 3.65625
| 4
|
[] |
no_license
|
s = float(input('Qual o salário do funcionario? R$'))
if s <= 1250:
a = s + (s * 15 / 100)
else:
a = s + (s * 10 / 100)
print('O salário passa a ser de R$', a)
| true
|
5917b61cf19f38afe845bf18da2165d035c8f8bf
|
Python
|
Giulianos/rl-stairs
|
/Training.py
|
UTF-8
| 2,511
| 3.078125
| 3
|
[] |
no_license
|
import MapLoader
import numpy as np
from World import World, Action
from Tile import Tile
from Policy import GreedyPolicy
from Option import Option, PrimitiveOption
from QLearning import learn_episodic
from State import State
def training_base(name, options, map_file):
print('Learning {}...'.format(name))
training_map = MapLoader.open_file(map_file)
# setup parameters
max_episodes = 10000
max_steps = 15
alpha = lambda t: 0.5 * np.exp(-1*t/max_episodes)
epsilon = lambda t: 0.3 * np.exp(-10*t/max_episodes)
# reward function
def reward_func(env):
if env.goal_achieved():
return 100
elif env.max_steps_surpassed() or env.goal_surpassed():
return -100
else:
return -10
# world generator
def world_gen(episode):
w = World(training_map)
w.set_max_steps(max_steps)
return w
if len(options) == 0:
options = [
PrimitiveOption(Action.NOTHING),
PrimitiveOption(Action.WALK),
PrimitiveOption(Action.JUMP),
]
policy = GreedyPolicy(options)
# learn the policy
learn_episodic(
world_gen=world_gen,
policy=policy,
reward_func=reward_func,
max_episodes=max_episodes,
alpha=alpha,
epsilon=epsilon,
discount_rate=0.95,
)
# print q table
print(policy)
policy.epsilon = 0
return policy
def jump_slab():
return training_base('jump slab', [], 'training_maps/jump_slab.map')
brick_ahead_floor = State((0,0), [[Tile.EMPTY, Tile.BRICK], [Tile.EMPTY, Tile.EMPTY]])
empty_ahead_floor = State((0,0), [[Tile.EMPTY, Tile.EMPTY], [Tile.EMPTY, Tile.EMPTY]])
def falling():
# learn to climb slabs
jump_slab_policy = jump_slab()
# learn to fall and climb slabs
jump_slab_option = Option(
'CLIMB_SLAB',
jump_slab_policy,
lambda s: s == brick_ahead_floor,
lambda s: s == empty_ahead_floor,
)
print('CLIMB_WALL will start when state is {}'.format(brick_ahead_floor))
print('CLIMB_WALL will end when state is {}'.format(empty_ahead_floor))
return training_base(
'falling',
[
PrimitiveOption(Action.NOTHING),
PrimitiveOption(Action.WALK),
PrimitiveOption(Action.JUMP),
jump_slab_option,
],
'training_maps/falling.map'
)
| true
|
30d65933f5fa30c3214c4a7b4f0e3e0631e5911e
|
Python
|
DragonDodo/cipher
|
/grids2.py
|
UTF-8
| 1,559
| 3.46875
| 3
|
[] |
no_license
|
# playfair grid generator from crib
class Grid:
def __init__(self):
self.positions = {}
self.letters = {}
def getPositionOf(self, letter):
if letter in self.positions:
return self.positions[letter]
else:
return None # raise?
def getLetterAt(self, position):
if position in self.letters:
return self.letters[position]
else:
return "?"
def addLetterAt(self, letter, position):
if letter in self.positions:
del self.positions[letter]
if position in self.letters:
del self.letters[position]
self.letters[position] = letter # sanity check on bounds?
self.positions[letter] = position
def decryptPair(self, p):
p1, p2 = p
if p1 in self.positions:
x1, y1 = self.positions[p1]
else:
return "??"
if p2 in self.positions:
x2, y2 = self.positions[p2]
else:
return "??"
if x1 == x2:
return self.getLetterAt((x1,(y1+1)%5)) + self.getLetterAt ((x2,(y2+1)%5))
elif y1 == y2:
return self.getLetterAt(((x1+1)%5, y1)) + self.getLetterAt (((x2+1)%5, y2))
else:
return self.getLetterAt( (x2, y1)) + self.getLetterAt((x1, y2))
def decrypt(self, s):
s = s.replace(" ", "")
out = ""
while s:
out += self.decryptPair(s[:2])
s = s[2:]
| true
|
9391d8bc0ae92744352d49c836afdc1c186c7860
|
Python
|
yunnuoyang/joinx-flask-day01
|
/com/joinx/04/SocketClient.py
|
UTF-8
| 274
| 3.125
| 3
|
[] |
no_license
|
import socket # 导入 socket 模块
s = socket.socket() # 创建 socket 对象
host = socket.gethostname() # 获取本地主机名
port = 12345 # 设置端口号
s.connect((host, port))
letter=s.recv(1024)
print(letter.decode()) #将bytes数据解密出来
s.close()
| true
|
91e73376cf7f3641fd0319d4652484e1a5cf82d6
|
Python
|
haru-256/cgan.tf
|
/dcgan.py
|
UTF-8
| 14,648
| 3
| 3
|
[] |
no_license
|
Import tensorflow as tf
import numpy as np
class DCGAN(object):
"""DCGAN
Parameters
----------------------
path: Path object
Filewriterを作る場所を示すpath
"""
def __init__(self,
n_hidden=100,
bottom_width=4,
ch=128,
num_data=128,
wscale=0.02,
path=None):
with tf.Graph().as_default():
tf.set_random_seed(20170311) # 乱数を固定
with tf.variable_scope("model"):
with tf.variable_scope("input"):
self.noise = tf.placeholder(
tf.float32, [None, 100], name="z")
self.image = tf.placeholder(
tf.float32, [None, 28, 28, 1], name="x")
self.labels = tf.placeholder(tf.float32, shape=[None, 10])
self.is_training = tf.placeholder(
tf.bool, [], name="is_training")
# generate fake image
self.fake_image = self._generator(
self.noise,
self.is_training,
self.labels,
bottom_width=bottom_width,
ch=ch,
wscale=wscale)
# define real loss and fake loss
d_real = self._discriminator(self.image, self.is_training,
self.labels, num_data, wscale)
d_fake = self._discriminator(
self.fake_image,
self.is_training,
self.labels,
num_data,
wscale,
reuse=True)
# define generator loss and discriminator loss respectively
self.loss_d, self.loss_g = self.losses(d_real, d_fake)
# make train_op
self.train_op = self.make_train_op(self.loss_d, self.loss_g)
# self.d_optim, self.g_optim = self.make_train_op(self.loss_d, self.loss_g)
# tensorboard
# self.run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# self.run_metadata = tf.RunMetadata()
tf.summary.scalar("loss_d", self.loss_d)
tf.summary.scalar("loss_g", self.loss_g)
tf.summary.image('image', self.fake_image, 10)
self.summary = tf.summary.merge_all()
# initialize
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
# make Filewriter
self.writer = tf.summary.FileWriter(
str(path), graph=self.sess.graph)
def dis_one_hot(self, labels, num_data):
"""make one-hot vector
Parametors
-----------------
labels: placeholder(tf.int32)
label data (one hot vector)
num_data: int32
number of datas
Return
----------------
one hot vector for discriminator.
shape is (N, 28, 28, C)
"""
one_hot_labels = tf.reshape(labels, [-1, 1, 1, 10])
mask = tf.ones((num_data, 28, 28, 10))
return tf.multiply(mask, one_hot_labels)
def _generator(self, inputs, is_training, labels, wscale, bottom_width,
ch):
"""build generator
Parameters
---------------------
inputs: placeholder(shape=(n_batch, n_dim=100), tf.float32)
input data.
is_training: placeholder(shape=(1), tf.bool)
training flag.
labels: placeholder(shape=(n_batch, n_class), tf.float32)
label data(one hot vector)
wscale: float
initializer's stddev
bottom_width: int
Width when converting the output of the first layer
to the 4-dimensional tensor
ch: int
Channel when converting the output of the first layer
to the 4-dimensional tensor
Return
---------------------
fake_image: Tensor(shape=(n_batch, 1, 28, 28), tf.float32)
range is -1 ~ 1
"""
# define initializer
# mean=0.0, stddev=wscale
init = tf.initializers.random_normal(stddev=wscale)
# weight decay
regularizer = tf.contrib.layers.l2_regularizer(scale=0.0001)
with tf.variable_scope("generator", reuse=None):
# concat
outputs = tf.concat([inputs, labels], axis=1)
# FC-1
outputs = tf.layers.dense(
inputs=outputs,
units=1024,
kernel_initializer=init,
kernel_regularizer=regularizer,
name="dense1")
# BN-1
outputs = tf.layers.batch_normalization(
inputs=outputs, training=is_training)
# Activation-1
outputs = tf.nn.relu(outputs)
# FC-2
outputs = tf.layers.dense(
inputs=outputs,
units=bottom_width * bottom_width * ch,
kernel_initializer=init,
kernel_regularizer=regularizer,
name="dence2")
# BN-2
outputs = tf.layers.batch_normalization(
inputs=outputs, training=is_training)
# Activation-2
outputs = tf.nn.relu(outputs)
# reshape NHWC
outputs = tf.reshape(outputs, [-1, bottom_width, bottom_width, ch])
# Deconv-3
outputs = tf.layers.conv2d_transpose(
inputs=outputs,
filters=ch // 2,
kernel_size=4,
strides=1,
padding="valid",
kernel_initializer=init,
kernel_regularizer=regularizer,
name="deconv3") # (7, 7)
# BN-3
outputs = tf.layers.batch_normalization(
inputs=outputs, training=is_training)
# Activation-3
outputs = tf.nn.relu(outputs)
# Deconv-4
outputs = tf.layers.conv2d_transpose(
inputs=outputs,
filters=ch // 4,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer=init,
kernel_regularizer=regularizer,
name="deconv4") # (14, 14)
# BN-4
outputs = tf.layers.batch_normalization(
inputs=outputs, training=is_training)
# Activation-4
outputs = tf.nn.relu(outputs)
# Deconv-5
fake_image = tf.layers.conv2d_transpose(
inputs=outputs,
filters=1,
kernel_size=4,
strides=2,
padding="same",
activation=tf.nn.tanh,
kernel_initializer=init,
kernel_regularizer=regularizer,
name="deconv5") # (28, 28)
return fake_image
def _discriminator(self,
inputs,
is_training,
labels,
num_data,
wscale,
reuse=None):
"""build discriminator
Parameters
---------------------
inputs: placeholder(shape=(n_batch, 28, 28, 1), tf.float32)
input data.
is_training: placeholder(shape=(1), tf.bool)
training flag.
labels: placeholder(shape=(n_batch, 10))
labels data (one hot vector)
num_data
wscale: float
initializer's stddev
reuse: boolean
this parameter is used to tf.variable_scope()
Return
---------------------
logits: Tensor(shape=(n_batch, 1))
output data not passing through tf.nn.sigmoid()
"""
with tf.variable_scope("discriminator", reuse=reuse):
# define initializer
# mean=0.0, stddev=wscale
init = tf.initializers.random_normal(stddev=wscale)
# weight decay
regularizer = tf.contrib.layers.l2_regularizer(scale=0.0001)
# one_hot
dis_labels = self.dis_one_hot(labels, num_data)
# concat
outputs = tf.concat([inputs, dis_labels], axis=-1)
# C-1
outputs = tf.layers.conv2d(
inputs=outputs,
filters=64,
kernel_size=5,
strides=2,
padding="same",
activation=tf.nn.leaky_relu,
kernel_initializer=init,
kernel_regularizer=regularizer,
name="conv1")
# C-2
outputs = tf.layers.conv2d(
inputs=outputs,
filters=32,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer=init,
kernel_regularizer=regularizer,
name="conv2")
# BN-2
outputs = tf.layers.batch_normalization(
inputs=outputs, training=is_training, scale=False)
# Activation-2
outputs = tf.nn.leaky_relu(outputs)
# C-3
outputs = tf.layers.conv2d(
inputs=outputs,
filters=16,
kernel_size=3,
strides=1,
padding="same",
kernel_initializer=init,
kernel_regularizer=regularizer,
name="conv3")
# BN-3
outputs = tf.layers.batch_normalization(
inputs=outputs, training=is_training, scale=False)
# Activation-3
outputs = tf.nn.leaky_relu(outputs)
# Flatten
outputs = tf.layers.flatten(outputs)
# FC-4
logits = tf.layers.dense(
inputs=outputs,
units=1,
kernel_initializer=init,
kernel_regularizer=regularizer)
return logits
def losses(output, dis_real, dis_fake):
"""define loss function
Parameters
-------------------
dis_real: Tensor(shape=(num_batch, 1))
logits of real image
dis_fake: Tensor(shape=(num_batch, 1))
logits of fake(generate) image
Returns
-----------------
loss_d: Tensor(scalar)
discriminator loss value to minimize
loss_g; Tensor(salar)
generator loss value to minimize
"""
# convert labels into one-hot labels
# one_hot = tf.one_hot(label, 10)
# define loss function
with tf.name_scope("losses"):
with tf.name_scope("dis_loss"):
with tf.name_scope("weight_decay"):
reg_collection = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES,
scope='model/discriminator')
weight_decay = tf.reduce_sum(reg_collection)
"""
loss_d_real = tf.losses.sigmoid_cross_entropy(
tf.ones_like(dis_real), dis_real)
loss_d_fake = tf.losses.sigmoid_cross_entropy(
tf.zeros_like(dis_fake), dis_fake)
"""
loss_d_real = tf.reduce_mean(tf.nn.softplus(-dis_real))
loss_d_fake = tf.reduce_mean(tf.nn.softplus(dis_fake))
loss_d = (loss_d_real + loss_d_fake) / 2 + weight_decay
with tf.name_scope("gen_loss"):
with tf.name_scope("weight_decay"):
reg_collection = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES,
scope='model/generator')
weight_decay = tf.reduce_sum(reg_collection)
loss_g = tf.reduce_mean(tf.nn.softplus(-dis_fake))
"""
loss_g = tf.losses.sigmoid_cross_entropy(
tf.ones_like(dis_fake), dis_fake)
"""
loss_g = loss_g + weight_decay
return loss_d, loss_g
def make_train_op(self, loss_d, loss_g):
"""make train_step
Parameters
------------------
loss_d: Tensor(scalar)
discriminator loss value to minimize
loss_g; Tensor(salar)
generator loss value to minimize
Return
------------------
train_step: train_op
If you execute this op learning progresses
"""
with tf.name_scope("optimizer"):
# extract trainable variables from generator and discriminator
vars_g = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='model/generator')
vars_d = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='model/discriminator')
# print("vars_g", vars_g)
# print("vars_d", vars_d)
# It is necessary to update BN average_mean, etc ...
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
d_optim = tf.train.AdamOptimizer(
learning_rate=0.0001, beta1=0.5).minimize(
loss_d, var_list=vars_d)
g_optim = tf.train.AdamOptimizer(
learning_rate=0.0002, beta1=0.5).minimize(
loss_g, var_list=vars_g)
with tf.control_dependencies([g_optim, d_optim]):
train_op = tf.no_op(name='train')
return train_op
# return d_optim, g_optim
if __name__ == "__main__":
import pathlib
import numpy as np
# import matplotlib as mpl
# mpl.use('Agg') # sshのために
# import mnist data
# (x_train, label), _ = tf.keras.datasets.mnist.load_data()
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
is_training = tf.placeholder(tf.bool, [])
# デレクトリの初期化
path = "dcgan_logs"
abs_path = pathlib.Path(path).resolve()
if abs_path.exists():
try:
abs_path.rmdir()
except OSError:
import shutil
shutil.rmtree(abs_path)
finally:
print("Init Dir!!!")
# mnist.train.next_batch に置いて中でnumpyのseedを使っているので...
np.random.seed(20170311)
gan = DCGAN(path=path)
| true
|
f11545190f11de3907b739117fc277c2da314491
|
Python
|
unimonkiez/socket-project
|
/common/response.py
|
UTF-8
| 668
| 2.90625
| 3
|
[] |
no_license
|
from enum import Enum as _Enum
class ResponseTypes(_Enum):
accept = 1
reject = 2
class Response:
def __init__(self, resType: ResponseTypes, data: dict):
self.type = resType
self.data = data
def toDict(self):
return {
"type": self.type.value,
"data": self.data
}
@classmethod
def fromDict(cls, someDict):
return Response(ResponseTypes(someDict["type"]), someDict["data"])
def handle(self, res, rej):
if (self.type == ResponseTypes.accept):
res(self.data)
elif (self.type == ResponseTypes.reject):
rej(self.data)
| true
|
1c425c4f7a802345981aab3af2a9e00939b1fb00
|
Python
|
cianoflynn/codingchallenges
|
/codewars.com/Persistent Bugger.py
|
UTF-8
| 896
| 4.15625
| 4
|
[] |
no_license
|
# DATE: 12/03/19
# URL: https://www.codewars.com/kata/persistent-bugger/python
'''
Write a function, persistence, that takes in a positive parameter num and returns its multiplicative persistence, which is the number of times you must multiply the digits in num until you reach a single digit.
For example:
persistence(39) => 3 # Because 3*9 = 27, 2*7 = 14, 1*4=4
# and 4 has only one digit.
persistence(999) => 4 # Because 9*9*9 = 729, 7*2*9 = 126,
# 1*2*6 = 12, and finally 1*2 = 2.
persistence(4) => 0 # Because 4 is already a one-digit number.
'''
from functools import reduce
def persistence(n):
count = 0
while len([int(d) for d in str(n)]) > 1:
#CONVERT STRING INTO LIST
lista = [int(d) for d in str(n)]
n = reduce(lambda x, y: x*y,lista)
persistence(n)
count +=1
return count
| true
|
bbc42965bf6182bf05ad5db883e154766c1977ec
|
Python
|
shrutikamokashi/AmazonOA-4
|
/AmazonOA/splitarray1.py
|
UTF-8
| 359
| 3.3125
| 3
|
[] |
no_license
|
import math
def permutationsequence(n,k):
nums = [i for i in range(1,n+1)]
ans = ''
k -= 1
for i in range(1,n+1):
n -= 1
index,k = divmod(k, math.factorial(n))
ans += str(nums[index])
nums.remove(nums[index])
return ans
n= 3
k=3
print(permutationsequence(n,k))
| true
|
3843b8bbdb35458a9baf38cfc7587e728e5d3211
|
Python
|
santoshdkolur/face_blur
|
/face_blur_selectedFaces.py
|
UTF-8
| 2,877
| 3.109375
| 3
|
[] |
no_license
|
import cv2
import face_recognition as fr
from time import sleep
import numpy as np
import progressbar
def main():
#Purpose : To blur out faces in a video stream to maintain privacy. This fucntion can be applied over any other project.
#Author : Santosh D Kolur
count=0 #count frame number
copy_flocation=[] #To maintain a copy of face_locations later.
t=0 #To store time later
video=input("Enter the name of the video file: Ex- Endgame.mp4 : ")
image=fr.load_image_file(input("Enter the name of the file containing the face to be blurred including the extension.Ex: robert.jpg\n(If the image has more than one face both of them will be blurred out in the video)"))
face_locations=fr.face_locations(image)
img_face_encodings=fr.face_encodings(image,face_locations)
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
cap=cv2.VideoCapture(video) #Video capture through webcam. This could be changed to any video stream or a video file.
ret,frame=cap.read()
out = cv2.VideoWriter("Output2.avi", cv2.VideoWriter_fourcc(*"XVID"), cap.get(cv2.CAP_PROP_FPS),(640,480))
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
bar = progressbar.ProgressBar(maxval=length,widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
while cap.isOpened():
ret,frame=cap.read() #Read frame from webcam
if ret == True:
count=count+1
bar.update(count+1)
frame1=frame[:, :, ::-1] #Converting BGR to RGB for face_recognition to work.
face_locations=fr.face_locations(frame1)
face_encodings=fr.face_encodings(frame,face_locations)
for j,face_encoding in enumerate(face_encodings):
result=fr.compare_faces(img_face_encodings,face_encoding)
if( 1 in result ): # If face found in the frame
f=face_locations[j]
top, right, bottom, left = f[0],f[1],f[2],f[3]
copy_flocation=face_locations
face =frame[top:bottom, left:right]
face = cv2.medianBlur(face,35,5)
face = cv2.GaussianBlur(face,(35,5),100)
frame[top:bottom, left:right]=face
frame=cv2.resize(frame,(640,480))
out.write(np.array(frame).astype('uint8'))
#if(count==100):
#break
if cv2.waitKey(1) & 0xFF == ord('q'): #Press Q to close the window
break
if count == length:
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
bar.finish()
main()
| true
|
1f8e563092977abdb0def33e9909316905e5a8b1
|
Python
|
dragino/aws-iot-core-lorawan
|
/transform_binary_payload/src-payload-decoders/python/tabs_objectlocator.py
|
UTF-8
| 5,628
| 2.609375
| 3
|
[
"MIT-0"
] |
permissive
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Payload as described in Reference Manual (TBOL100)
# Sensors status
# Bit[0] Bit[1] Bit[2] Bit [3] Bit [4] Bit[7:5]
# 1 - button trigger event, 0 - no button trigger event 1 - moving mode, 0 - stationary mode (TBD)
# RFU
# 1 - no GNSS fix, 0 - GNSS fixed
# 1 - GNSS error, 0 - GNSS OK RFU
# Battery level
# Bits [3:0] unsigned value ν, range 1 – 14; battery voltage in V = (25 + ν) ÷ 10.
# Bits [7:4] RFU
# Temperature as measured by on-board NTC
# Bits [6:0] unsigned value τ, range 0 – 127; temperature in °C = τ - 32.
# Bit [7] RFU
# Latitude as last reported by GNSS receiver
# Bits [27:0] signed value φ, range -90,000,000 – 90,000,000; WGS84 latitude in ° = φ ÷ 1,000,000.
# *Note: little-endian format. Bits [31:28] RFU
# Longitude and position accuracy estimate as last reported by GNSS receiver
# Bits [28:0] signed value λ, range -179,999,999 – 180,000,000; WGS84 longitude in ° = λ÷ 1,000,000.
# Bits [31:29] unsigned value α, range 0-7;
# position accuracy estimate in m = 2α +2 (max).
# The value 7 represents an accuracy estimate of worse than 256m.
import base64
import json
DEBUG_OUTPUT = False
def dict_from_payload(base64_input: str, fport: int = None):
""" Decodes a base64-encoded binary payload into JSON.
Parameters
----------
base64_input : str
Base64-encoded binary payload
fport: int
FPort as provided in the metadata. Please note the fport is optional and can have value "None", if not provided by the LNS or invoking function.
If fport is None and binary decoder can not proceed because of that, it should should raise an exception.
Returns
-------
JSON object with key/value pairs of decoded attributes
"""
decoded = base64.b64decode(base64_input)
if DEBUG_OUTPUT:
print(f"Input: {decoded.hex().upper()}")
# Byte 1
status_flag_button_triggered = decoded[0] & 0b10000000 != 0
status_flag_moving_mode = decoded[0] & 0b01000000 != 0
status_flag_gnss_fix = decoded[0] & 0b00010000 == 0
status_flag_gnss_error = decoded[0] & 0b00001000 != 0
# Byte 2
battery = (25 + (decoded[1] & 0b00001111))/10
# Byte 3
temp = decoded[2] & 0b01111111 - 32
# Bytes 4-7
lat = decoded[3] | decoded[4] << 8 | decoded[5] << 16 | decoded[6] << 24
lat = lat / 1000000
# Bytes 8-11
long = decoded[7] | decoded[8] << 8 | decoded[9] << 16 | (
decoded[10] & 0b00001111) << 24
long = long / 1000000
position_accuracy = (decoded[10] & 0b11110000) >> 4
# Output
result = {
"status_flag_button_triggered": status_flag_button_triggered,
"status_flag_moving_mode": status_flag_moving_mode,
"status_flag_gnss_fix": status_flag_gnss_fix,
"status_flag_gnss_error": status_flag_gnss_error,
"battery_value": battery,
"temp": temp,
"lat": lat,
"long": long,
"position_accuracy": position_accuracy
}
if DEBUG_OUTPUT:
print(f"Output: {json.dumps(result,indent=2)}")
return result
# Tests
if __name__ == "__main__":
test_definition = [
{
"input_encoding": "base64",
"input_value": "Ae48SPbhAgRupmA=",
"output": {
"status_flag_button_triggered": False,
"status_flag_moving_mode": False,
"status_flag_gnss_fix": True,
"status_flag_gnss_error": False,
"battery_value": 3.9,
"temp": 28,
"lat": 48.36308,
"long": 10.90714,
"position_accuracy": 6
}
}
]
for testcase in test_definition:
base64_input = None
if testcase.get("input_encoding") == "base64":
base64_input = testcase.get("input_value")
elif testcase.get("input_encoding") == "hex":
base64_input = base64.b64encode(
bytearray.fromhex(testcase.get("input_value"))).decode("utf-8")
output = dict_from_payload(base64_input)
for key in testcase.get("output"):
if testcase.get("output").get(key) != output.get(key):
raise Exception(
f'Assertion failed for input {testcase.get("input_value")}, key {key}, expected {testcase.get("output").get(key)}, got {output.get(key)}')
else:
print(
f'"{testcase.get("input_value")}" : Successfull test for key "{key}", value "{testcase.get("output").get(key)}"')
| true
|
862dfc25fa9018249cd6b1e72a9167f2875ee6ec
|
Python
|
bbrady5/SeniorDesignProject
|
/tcp_client_1.py
|
UTF-8
| 1,013
| 2.96875
| 3
|
[] |
no_license
|
# TCP Client Example
#
# This example shows how to send and receive TCP traffic with the WiFi shield.
import network, usocket
# AP info
SSID='Villanova Senior Design' # Network SSID
KEY='merakipassword' # Network key
# Init wlan module and connect to network
print("Trying to connect... (may take a while)...")
wlan = network.WINC()
print(" here 1.00")
wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK)
print(" here 1.0")
# We should have a valid IP now via DHCP
print(wlan.ifconfig())
print(" here 1.1")
# Get addr info via DNS
addr = usocket.getaddrinfo("www.google.com", 80)[0][4]
print(" here 1.2")
# Create a new socket and connect to addr
client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
client.connect(addr)
print(" here 2")
# Set timeout to 1s
client.settimeout(1.0)
print(" here 3")
# Send HTTP request and recv response
client.send("GET / HTTP/1.0\r\n\r\n")
print(client.recv(1024))
print(" here 4")
# Close socket
client.close()
print(" here 5")
| true
|
2de57c77248c6ec47b8dee300786e8f0d7fd367d
|
Python
|
kluangkhflemingc/geom73
|
/CountEverything.py
|
UTF-8
| 3,611
| 3.171875
| 3
|
[] |
no_license
|
# File: CountEverything.py
# Date: January 28 2021
# Authors: Kristine Luangkhot & Jennifer Debono
# Script to inventory data in a user specified folder
# Inventory to include:
# Count and report the total number of files and type - Excel files and shapefiles only
# Count and report the total number of rows or features for each file type
# List and report the feilds of point shapefiles
import os
import arcpy
# Assumes that the input data and Python script are located in the same workspace (folder)
# Hard code the workspace path if input data is located elsewhere
# requests program user enter in the folder to be the current workspace
print()
print("This script was made for Spatial Properties Inc. to inventory data in a user-specified folder.")
print()
cwd = os.getcwd()
inFolder = input("Enter the name of the input folder (ensure proper case and spelling): ")
arcpy.env.workspace = cwd + "\\" + inFolder
arcpy.env.overwriteOutput = True
print("Current workspace or folder: {0}".format(arcpy.env.workspace))
# Create a list of all shapefiles
allShapefiles = arcpy.ListFeatureClasses()
# Excel files can have the extension .xlsx or .xls
# Create separate lists of Excel files with extension .xlsx and .xls
excelfilesXLSX = arcpy.ListFiles("*.xlsx")
excelfilesXLS = arcpy.ListFiles("*.xls")
# Concatenate the lists of Excel files to create one list
allExcelFiles = excelfilesXLSX + excelfilesXLS
print()
print("The folder {0} contains: {1} shapefiles and {2} Excel files.".format(inFolder, len(allShapefiles), len(allExcelFiles)))
print()
print()
print("Number of rows in each file is as follows:")
print("*********************************************************************************************************")
# Find the longest string name (file name) in the list of shapefiles for formatting purposes
maxName = len(max(allShapefiles, key=len))
print("{0: <{1}}\t{2: <{1}}".format("Shapefile Name", maxName, "Number of Features"))
# GetCount tool used to determine total number of rows for the shapefiles
# found in the shapefile list
# List and display the number of rows for each shapefile
for shapefile in allShapefiles:
numberFeatures = arcpy.management.GetCount(shapefile)
print("{0: <{1}}\t{2: <{1}}".format(shapefile, maxName, str(numberFeatures)))
print()
# Find the longest string name (file name) in the list of Excel files for formatting purposes
maxExcel = len(max(allExcelFiles, key=len))
print("{0: <{1}}\t{2: <{1}}".format("Excel File Name", maxExcel, "Number of Rows"))
for excelFile in allExcelFiles:
# Get Count tool only works with .csv, .dbf or .txt tables
# Convert all Excel files to .dbf tables to count and display the number of rows
newTable = arcpy.conversion.ExcelToTable(excelFile, excelFile)
numberRows = arcpy.management.GetCount(newTable)
print("{0: <{1}}\t{2: <{1}}".format(excelFile, maxExcel, str(numberRows)))
# New tables were not asked to be created, so new .dbf files are immediately deleted
arcpy.management.Delete(newTable)
# Use a Describe object to determine whether the shapefile contains point features (geometry shape type = point)
# List and report the field names for each point shapefile
# Checks every shapefile in the input folder
print()
print()
print("Existing point shapefiles and fields:")
print("*********************************************************************************************************")
for shapefile in allShapefiles:
desc = arcpy.Describe(shapefile)
print()
if desc.shapeType == "Point":
print(shapefile)
for field in desc.fields:
print(field.name)
| true
|
004c445b430a8e451cd57d1f904ba3c298aefa8e
|
Python
|
mjohnston89/AdventOfCode
|
/2016/Day 03/c2.py
|
UTF-8
| 855
| 3.6875
| 4
|
[] |
no_license
|
print('Advent of Code - Day 3, Challenge 2')
from itertools import islice
def isValid(sides):
return max(sides) < (sum(sides) - max(sides))
validCount = 0
# Open source file and parse each triangle
with open('input.txt') as file:
while True:
temps = [[],[],[]]
# read 3 lines of input
lineBlock = list(islice(file,3))
if not lineBlock: break
for i in range(3):
# for each line process into 3 arrays of 3 ints
lineBlock[i] = map(int,lineBlock[i].strip().split())
for j in range(3):
# reorganise into groups of 3 by index
temps[j].append(lineBlock[i][j])
if (len(temps[0])==3):
for k in range(3):
# when each contains 3 sides check if valid
check = isValid(temps[k])
if check: validCount += 1
print('Number of valid triangles is: ' + str(validCount))
| true
|
a994e1715a365f16f4049d16010a33bb9936fe1d
|
Python
|
xiangbaloud/okgo_py
|
/fibo.py
|
UTF-8
| 168
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/python
def fib(n):
list_a = [0, 1]
for i in range(n):
x = list_a[-1] + list_a[-2]
list_a.append(x)
return list_a
print(fib(17))
| true
|
72b208196631a7a0917a32c770877251d3f4d1c3
|
Python
|
arohan-agate/Python-Data-Analysis-Projects
|
/Python for Data Visualization: Matplotlib & Seaborn/Mini Challenge 3.py
|
UTF-8
| 310
| 3.359375
| 3
|
[] |
no_license
|
values = [20, 20, 20, 20, 20]
colors = ['g', 'r', 'y', 'b', 'm']
labels = ['AAPL', 'GOOG', 'T', 'TSLA', 'AMZN']
explode = [0, 0.2, 0, 0, 0.2]
# Use matplotlib to plot a pie chart
plt.figure(figsize = (10, 10))
plt.pie(values, colors = colors, labels = labels, explode = explode)
plt.title('STOCK PORTFOLIO')
| true
|
0db751a566ca53bd133371ad21511b8cd62ee130
|
Python
|
prakash959946/basic_python
|
/Gateway_problems/08_postive_or_Negative.py
|
UTF-8
| 293
| 4.34375
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Positive or Negative
"""
num = float(input("Enter any numeric value: "))
if (num < 0):
print('{0} is a Negative number'.format(num))
elif (num > 0):
print('{0} is a positive number'.format(num))
else:
print("You have entered Zero")
| true
|
e641de2d4dbe041e8817d02291d2c6a861f2c76e
|
Python
|
biocore/microsetta-public-api
|
/microsetta_public_api/models/_taxonomy.py
|
UTF-8
| 18,829
| 2.65625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
from collections import namedtuple, OrderedDict, Counter
from typing import Iterable, Dict, Optional, List
from abc import abstractmethod
import skbio
import biom
import numpy as np
import pandas as pd
import scipy.sparse as ss
from bp import parse_newick
from microsetta_public_api.exceptions import (DisjointError, UnknownID,
SubsetError)
from microsetta_public_api.utils import DataTable
from ._base import ModelBase
_gt_named = namedtuple('GroupTaxonomy', ['name', 'taxonomy', 'features',
'feature_values',
'feature_variances'])
class GroupTaxonomy(_gt_named):
"""Minimal information to characterize taxonomic detail of a group
A group may represent the taxonomic detail of a single sample. Or, a group
may represent the summarized taxonomic detail of many samples (such as
all fecal samples).
Attributes
----------
name : str
The name of the group (e.g., 'sample-foo')
taxonomy : str
A newick string of the taxonomy represented
features : list of str
The names of the features represented by the group
feature_values : list of float
Values associated with the features (e.g., relative abundance, rank,
etc)
feature_variances : list of float
Values associated with the variance of the features
Notes
-----
A feature need not be a tip of the taxonomy. However, caution is advised
over possible nesting interpretation if representing sum-to-one data at
multiple levels.
"""
__slots__ = ()
def __init__(self, *args, name=None, taxonomy=None, features=None,
feature_values=None, feature_variances=None):
if args:
raise NotImplementedError("%s only supports kwargs" %
str(self.__class__))
for k in features:
# a _very_ lightweight check to avoid expense of full newick parse.
# this is not a perfect sanity test
if k not in taxonomy:
raise UnknownID("%s is not in the taxonomy." % k)
if (features and (feature_values is None)) or len(features) != len(
feature_values):
raise ValueError("features and feature_values have a length "
"mismatch")
if feature_variances is not None and len(features) != len(
feature_variances):
raise ValueError("features and feature_variances have a length "
"mismatch")
super().__init__()
def to_dict(self) -> Dict:
return self._asdict()
def __str__(self) -> Dict:
return str(self.to_dict())
class Taxonomy(ModelBase):
"""Represent the full taxonomy and facilitate table oriented retrieval"""
def __init__(self, table: biom.Table, features: pd.DataFrame,
variances: biom.Table = None,
formatter: Optional['Formatter'] = None,
rank_level: int = 1
):
"""Establish the taxonomy data
Parameters
----------
table : biom.Table
Relative abundance data per sample or collapsed into higher order
entiries (e.g., abx in the past year)
features : pd.DataFrame
DataFrame relating an observation to a Taxon
variances : biom.Table, optional
Variation information about a taxon within a label.
rank_level : int
The taxonomic level (depth) to compute ranks over. Level 0 is
domain, level 1 is phylum, etc.
"""
self._table = table.norm(inplace=False)
self._group_id_lookup = set(self._table.ids())
self._feature_id_lookup = set(self._table.ids(axis='observation'))
self._feature_order = self._table.ids(axis='observation')
self._features = features
if variances is None:
empty = ss.csr_matrix((len(self._table.ids(axis='observation')),
len(self._table.ids())), dtype=float)
self._variances = biom.Table(empty,
self._table.ids(axis='observation'),
self._table.ids())
else:
self._variances = variances
if set(self._variances.ids()) != set(self._table.ids()):
raise DisjointError("Table and variances are disjoint")
if set(self._variances.ids(axis='observation')) != \
set(self._table.ids(axis='observation')):
raise DisjointError("Table and variances are disjoint")
if not self._feature_id_lookup.issubset(set(self._features.index)):
raise SubsetError("Table features are not a subset of the "
"taxonomy information")
self._ranked, self._ranked_order = self._rankdata(rank_level)
self._features = self._features.loc[self._feature_order]
self._variances = self._variances.sort_order(self._feature_order,
axis='observation')
if formatter is None:
formatter: Formatter = GreengenesFormatter()
self._formatter = formatter
# initialize taxonomy tree
tree_data = ((i, lineage.split('; '))
for i, lineage in self._features['Taxon'].items())
self.taxonomy_tree = skbio.TreeNode.from_taxonomy(tree_data)
self._index_taxa_prevalence()
for node in self.taxonomy_tree.traverse():
node.length = 1
self.bp_tree = parse_newick(str(self.taxonomy_tree))
feature_taxons = self._features
self._formatted_taxa_names = {i: self._formatter.dict_format(lineage)
for i, lineage in
feature_taxons['Taxon'].items()}
def _rankdata(self, rank_level) -> (pd.DataFrame, pd.Series):
# it seems QIIME regressed and no longer produces stable taxonomy
# strings. Yay.
index = {}
for idx, v in self._features['Taxon'].items():
parts = v.split(';')
if len(parts) <= rank_level:
continue
else:
index[idx] = parts[rank_level].split('__')[-1].strip()
def collapse(i, m):
return index.get(i, 'Non-specific')
base = self._table.collapse(collapse, axis='observation', norm=False)
# 16S mitochondria reads report as g__human
keep = {v for v in base.ids(axis='observation')
if 'human' not in v.lower()}
keep -= {None, "", 'Non-specific', 'g__'}
base.filter(keep, axis='observation')
# reduce to the top observed taxa
median_order = self._rankdata_order(base)
base.filter(set(median_order.index), axis='observation')
base.rankdata(inplace=True)
# convert to a melted dataframe
base_df = base.to_dataframe(dense=True)
base_df.index.name = 'Taxon'
base_df_melted = base_df.reset_index().melt(id_vars=['Taxon'],
value_name='Rank')
base_df_melted = base_df_melted[base_df_melted['Rank'] > 0]
base_df_melted.rename(columns={'variable': 'Sample ID'}, inplace=True)
return base_df_melted, median_order
def _rankdata_order(self, table, top_n=50) -> pd.Series:
# rank by median
medians = []
for v in table.iter_data(axis='observation', dense=False):
medians.append(np.median(v.data))
medians = pd.Series(medians, index=table.ids(axis='observation'))
ordered = medians.sort_values(ascending=False).head(top_n)
ordered.loc[:] = np.arange(0, len(ordered), dtype=int)
return ordered
def _index_taxa_prevalence(self):
"""Cache the number of samples each taxon was observed in"""
features = self._table.ids(axis='observation')
n_samples = len(self._table.ids())
table_pa = self._table.pa(inplace=False)
# how many samples a feature was observed in
sample_counts = pd.Series(table_pa.sum('observation'),
index=features)
self.feature_uniques = sample_counts == 1
self.feature_prevalence = (sample_counts / n_samples)
def rare_unique(self, id_, rare_threshold=0.1):
"""Obtain the rare and unique features for an ID
Parameters
----------
id_ : str
The identifier to obtain rare/unique information for
rare_threshold : float
The threshold to consider a feature rare. Defaults to 0.1,
which is the historical rare value from the AGP
Raises
------
UnknownID
If the requested sample is not present
Returns
-------
dict
{'rare': {feature: prevalence}, 'unique': [feature, ]}
"""
if id_ not in self._group_id_lookup:
raise UnknownID('%s does not exist' % id_)
sample_data = self._table.data(id_, dense=False)
# self.feature_prevalence and self.feature_uniques are derived from
# self._table so the ordering of features is consistent
sample_prevalences = self.feature_prevalence.iloc[sample_data.indices]
sample_uniques = self.feature_uniques.iloc[sample_data.indices]
rare_at_threshold = sample_prevalences < rare_threshold
if rare_at_threshold.sum() == 0:
rares = None
else:
rares = sample_prevalences[rare_at_threshold].to_dict()
if sample_uniques.sum() == 0:
uniques = None
else:
uniques = list(sample_uniques[sample_uniques].index)
return {'rare': rares, 'unique': uniques}
def ranks_sample(self, sample_size: int) -> pd.DataFrame:
"""Randomly sample, without replacement, from ._ranked
Parameters
----------
sample_size : int
The number of elements to obtain. If value is greater than the
total number of entries in .ranked, all entries of .ranked will
be returned. If the value is less than zero, no values will be
returned
Returns
-------
pd.DataFrame
The subset of .ranked
"""
if sample_size < 0:
sample_size = 0
n_rows = len(self._ranked)
return self._ranked.sample(min(sample_size, n_rows), replace=False)
def ranks_specific(self, sample_id: str) -> pd.DataFrame:
"""Obtain the taxonomy rank information for a specific sample
Parameters
----------
sample_id : str
The sample identifier to obtain ranks for
Raises
------
UnknownID
If the requested sample is not present
Returns
-------
pd.DataFrame
The subset of .ranked for the sample
"""
subset = self._ranked[self._ranked['Sample ID'] == sample_id]
if len(subset) == 0:
raise UnknownID("%s not found" % sample_id)
else:
return subset.copy()
def ranks_order(self, taxa: Iterable[str] = None) -> List:
"""Obtain the rank order of the requested taxa names
Parameters
----------
taxa : Iterable[str], optional
The taxa to request ordering for. If not specified, return the
order of all contained taxa
Raises
------
UnknownID
If a requested taxa is not ranked or otherwise unknown
Returns
-------
list
The order of the taxa, where index 0 corresponds to the highest
ranked taxon, index 1 the next highest, etc
"""
if taxa is None:
taxa = set(self._ranked_order.index)
else:
taxa = set(taxa)
known = set(self._ranked_order.index)
unk = taxa - known
if len(unk) > 0:
raise UnknownID("One or more names are not in the top "
"ranks: %s" % ",".join(unk))
return [t for t in self._ranked_order.index if t in taxa]
def _get_sample_ids(self) -> np.ndarray:
return self._table.ids()
def _get_feature_ids(self) -> np.ndarray:
return self._table.ids(axis='observation')
def get_group_raw(self, ids: Iterable[str] = None, name: str = None):
"""Get raw values for a set of IDs"""
# NOTE: not sure if needed for Taxonomy
raise NotImplementedError
def get_group(self, ids: Iterable[str], name: str = None) -> GroupTaxonomy:
"""Get taxonomic detail for a given group
Parameters
----------
ids : list of str
The identifiers of a group to obtain
name : str
The name of the set of group. It must be provided if multiple
IDs are requested.
Raises
------
UnknownID
If an identifier is not present in the data.
ValueError
If a name is not specified when asking for multiple IDs
Returns
-------
GroupTaxonomy
Taxonomic detail associated with the ID
"""
for i in ids:
if i not in self._group_id_lookup:
raise UnknownID('%s does not exist' % i)
if len(ids) > 1:
if name is None:
raise ValueError("Name not specified.")
table = self._table.filter(set(ids), inplace=False).remove_empty()
features = table.ids(axis='observation')
feature_values = table.sum('observation')
feature_values /= feature_values.sum()
feature_variances = [0.] * len(feature_values)
else:
id_ = ids[0]
name = id_
# get data, pull feature ids out. Zeros are not an issue here as
# if it were zero, that means the feature isn't present
group_vec = self._table.data(id_, dense=False)
features = self._feature_order[group_vec.indices]
feature_values = group_vec.data
# handle variances, which may have zeros
feature_variances = self._variances.data(id_,
dense=True)
feature_variances = feature_variances[group_vec.indices]
# construct the group specific taxonomy
taxonomy = self._taxonomy_tree_from_features(features)
return GroupTaxonomy(name=name,
taxonomy=str(taxonomy).strip(),
features=list(features),
feature_values=list(feature_values),
feature_variances=list(feature_variances),
)
def _taxonomy_tree_from_features(self, features):
"""Construct a skbio.TreeNode based on the provided features"""
feature_taxons = self._features.loc[features]
tree_data = ((i, [taxon.lstrip() for taxon in lineage.split(';')])
for i, lineage in feature_taxons['Taxon'].items())
return skbio.TreeNode.from_taxonomy(tree_data)
def get_counts(self, level, samples=None) -> dict:
"""Obtain the number of unique maximal specificity features
Parameters
----------
level : str
The level to obtain feature counts for.
samples : str or iterable of str, optional
The samples to collect data for. If not provided, counts are
derived from all samples.
Returns
-------
dict
The {taxon: count} observed. The count corresponds to the greatest
taxonomic specificity in the data. As an example, if counting at
the phylum level, and FOO had 2 classified genera and a classified
family without named genera, the count returned would be {FOO: 3}.
The count is the number of features corresponding to the given
taxon that are present in any of the given samples.
"""
if samples is None:
table = self._table
elif isinstance(samples, str):
table = self._table.filter({samples, },
inplace=False).remove_empty()
else:
table = self._table.filter(set(samples),
inplace=False).remove_empty()
feature_taxons = self._features.loc[table.ids(axis='observation')]
ftn = self._formatted_taxa_names
observed = Counter([ftn[i].get(level, 'Unidentified')
for i in feature_taxons.index])
return observed
def presence_data_table(self, ids: Iterable[str]) -> DataTable:
table = self._table.filter(set(ids), inplace=False).remove_empty()
features = table.ids(axis='observation')
entries = list()
for vec, sample_id, _ in table.iter(dense=False):
for feature_idx, val in zip(vec.indices, vec.data):
entry = {
'sampleId': sample_id,
'relativeAbundance': val,
**self._formatted_taxa_names[features[feature_idx]],
}
entries.append(entry)
sample_data = pd.DataFrame(
entries,
# this enforces the column order
columns=['sampleId'] + self._formatter.labels + [
'relativeAbundance'],
# need the .astype('object') in case a
# column is completely empty (filled with
# Nan, default dtype is numeric,
# which cannot be replaced with None.
# Need None because it is valid for JSON,
# but NaN is not.
).astype('object')
sample_data[pd.isna(sample_data)] = None
return DataTable.from_dataframe(sample_data)
class Formatter:
labels: List
@classmethod
@abstractmethod
def dict_format(cls, taxonomy_string):
raise NotImplementedError()
class GreengenesFormatter(Formatter):
_map = OrderedDict(k__='Kingdom', p__='Phylum', c__='Class',
o__='Order', f__='Family', g__='Genus', s__='Species')
labels = list(_map.values())
@classmethod
def dict_format(cls, taxonomy_string: str):
ranks = [rank_.strip() for rank_ in taxonomy_string.split(';')]
formatted = OrderedDict()
for rank in ranks:
name = rank[:3]
if name in cls._map:
formatted[cls._map[name]] = rank[3:]
return formatted
| true
|
305aea8c305b2873300541d5ca05be04daa9ac17
|
Python
|
Shailendre/simplilearn-python-training
|
/section1 - basic python/lesson6.4.py
|
UTF-8
| 615
| 3.828125
| 4
|
[] |
no_license
|
# exception handling
# equal to try and catch
# syntax similar to java try and catch
"""
try {}
catch ( <exception class1> e1 ) {}
catch ( <exceptiom class2> e2) {}
"""
'''
try:
print ("6" + 5)
except Exception as e:
# raise => java 'throw e'
# raise
print (str(e)) # str(excetion): => e.getMessage()
'''
# Exceptiom handling
# multiple catches/except
# if none of them is matched with error type
# the general exceptiom is followed
try:
print (5 + x)
except TypeError as t:
print (str(t))
except NameError as n:
print (str(n))
except Exceptiom as e:
print (str(e))
| true
|
eb285a36ffe1449a632f4cce1fba3cde05390755
|
Python
|
barry800414/master_thesis
|
/errorAnalysis/ErrorAnalysis.py
|
UTF-8
| 10,770
| 2.859375
| 3
|
[] |
no_license
|
import sys
import math
import pickle
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix, accuracy_score
from misc import *
# print Coefficients in classifier
# clf: classifier
# volc: volc -> index (dict) for each column (feature)
# classMap: class-> text (dict)
def printCoef(clf, volcDict, classMap, sort=False, reverse=True, outfile=sys.stdout):
supportCoef = [MultinomialNB, LogisticRegression, LinearSVC]
if type(clf) in supportCoef:
if clf.coef_.shape[0] == 1:
print('Binary classification')
__printBinaryCoeff(clf, volcDict, classMap, sort, reverse, outfile)
else:
__printMultiClassCoeff(clf, volcDict, classMap, sort, reverse, outfile)
else:
return
def __printMultiClassCoeff(clf, volcDict, classMap, sort=False, reverse=True, outfile=sys.stdout):
print('Coefficients:', file=outfile)
coef = clf.coef_
cNum = coef.shape[0] # class number
cList = clf.classes_
fNum = coef.shape[1] # feature number
print('featureNum:', fNum)
print('main volc size:', getMainVolcSize(volcDict))
# for each class, sort the vector
cValues = list()
for ci in range(0, cNum):
values = [(i, v) for i, v in enumerate(coef[ci])]
if sort:
values.sort(key=lambda x:x[1], reverse=reverse)
else:
values = [(i, v) for i, v in enumerate(coef[ci])]
cValues.append(values)
for ci in range(0, cNum):
print('Class %s' % classMap[cList[ci]], end=',,', file=outfile)
print('', file=outfile)
for ri in range(0, fNum):
for ci in range(0, cNum):
(wIndex, value) = cValues[ci][ri]
print('(%d/%s)' % (wIndex, getWord(volcDict, wIndex)), value, sep=',', end=',', file=outfile)
print('', file=outfile)
def __printBinaryCoeff(clf, volcDict, classMap, sort=False, reverse=True, outfile=sys.stdout):
print('Coefficients:', file=outfile)
coef = clf.coef_
cNum = coef.shape[0] # class number
cList = clf.classes_
fNum = coef.shape[1] # feature number
print('featureNum:', fNum)
print('main volc size:', getMainVolcSize(volcDict))
# for each class, sort the vector
cValues = list()
values = [(i, v) for i, v in enumerate(coef[0])]
if sort:
values.sort(key=lambda x:x[1], reverse=reverse)
middle = int(fNum / 2)
cValues.append(values[0:middle]) #class 1
cValues.append(sorted(values[middle:], key=lambda x:x[1], reverse = not reverse)) #class 0
print('Class %s,, Class %s' % (classMap[1], classMap[0]), file=outfile)
for ri in range(0, max(len(cValues[0]), len(cValues[1]))):
for ci in [0, 1]:
if ri < len(cValues[ci]):
(wIndex, value) = cValues[ci][ri]
print(genOutStr(wIndex, volcDict, value), end='', file=outfile)
print('', file=outfile)
else:
values = [(i, v) for i, v in enumerate(coef[0])]
cValues.append(values)
print('Class %s' % (classMap[1]), file=outfile)
for ri in range(0, fNum):
(wIndex, value) = cValues[0][ri]
print(genOutStr(wIndex, volcDict, value), end='', file=outfile)
print('', file=outfile)
def genOutStr(wIndex, volcDict, value):
word = getWord(volcDict, wIndex, usingJson=False)
outStr = '%s, %f' % (word, value)
if len(outStr) < 60:
outStr += ' ' * (60 - len(outStr))
return outStr
# X is CSR-Matrix
def printXY(X, y, yPredict, volcDict, classMap, outfile=sys.stdout, showWordIndex=False):
assert X.shape[1] == getMainVolcSize(volcDict)
(rowNum, colNum) = X.shape
colIndex = X.indices
rowPtr = X.indptr
data = X.data
nowPos = 0
print('ConfusionMaxtrix: %s' % classMap, file=outfile)
print(confusion_matrix(y, yPredict), file=outfile)
#sumOfCol = [0.0 for i in range(0, colNum)]
docF = [0 for i in range(0, colNum)]
print('label, predict', file=outfile)
for ri in range(0, rowNum):
print(classMap[y[ri]], classMap[yPredict[ri]], sep=',', end=',', file=outfile)
for ci in colIndex[rowPtr[ri]:rowPtr[ri+1]]:
value = data[nowPos]
word = getWord(volcDict, ci, usingJson=False)
if showWordIndex:
print('(%d/%s):%.2f' % (ci, word, value), end=',', file=outfile)
else:
print('%s:%.2f' % (word, value), end=',', file=outfile)
if math.fabs(value) > 1e-15:
docF[ci] += 1
nowPos += 1
print('', file=outfile)
print('Document Frequency:', file=outfile)
for ci in range(0, colNum):
word = getWord(volcDict, ci)
print('(%d/%s):%.2f' % (ci, word, docF[ci]), file=outfile)
#print('', file=outfile)
def printCScore(logList, scorerName, y, outfile=sys.stdout):
for log in logList:
C = log['param']['C']
testScore = log['testScore'][scorerName]
valScore = log['valScore']
trainIndex = log['split']['trainIndex']
testIndex = log['split']['testIndex']
yTrain = y[trainIndex]
yTrainPredict = log['predict']['yTrainPredict']
trainScore = accuracy_score(yTrain, yTrainPredict)
print(C, valScore, trainScore, testScore, file=outfile)
def printLog(log, y, classMap, outfile=sys.stdout):
clf = log['clf']
#params = log['params']
valScore = log['valScore']
testScore = log['testScore']
param = log['param']
print(clf, file=outfile)
print('Parameters:', toStr(param), file=outfile)
print('valScore:', valScore, file=outfile)
print('testScore:', testScore, file=outfile)
trainIndex = log['split']['trainIndex']
testIndex = log['split']['testIndex']
yTrain = y[trainIndex]
yTest = y[testIndex]
yTrainPredict = log['predict']['yTrainPredict']
yTestPredict = log['predict']['yTestPredict']
print('Training Data ConfusionMaxtrix: %s' % classMap, file=outfile)
print(confusion_matrix(yTrain, yTrainPredict), file=outfile)
print('Testing Data ConfusionMaxtrix: %s' % classMap, file=outfile)
print(confusion_matrix(yTest, yTestPredict), file=outfile)
print('\n\n', file=outfile)
def printVolc(volc, outfile=sys.stdout):
print('Volcabulary:', file=outfile)
for i in range(0, len(volc)):
print(i, volc.getWord(i), sep=',', file=outfile)
def getWord(volcDict, index, usingJson=False, recursive=True):
if type(volcDict) == dict:
word = volcDict['main'].getWord(index, maxLength=15, usingJson=usingJson)
if recursive:
return __recursiveGetWord(volcDict, word, usingJson=usingJson)
else:
return word
elif type(volcDict) == list:
volcSize = [len(v['main']) for v in volcDict]
assert index < sum(volcSize)
for i in range(0, len(volcSize)):
if index >= volcSize[i]:
index = index - volcSize[i]
else:
word = volcDict[i]['main'].getWord(index, maxLength=15, usingJson=usingJson)
if recursive:
return __recursiveGetWord(volcDict, word, usingJson=usingJson)
else:
return word
def __recursiveGetWord(volcDict, word, usingJson=False):
if type(word) == str:
return word
newW = list(word)
#OLDM
if 'seed' in volcDict:
newW[0] = volcDict['seed'].getWord(word[0], maxLength=15, usingJson=usingJson)
if 'firstLayer' in volcDict:
newW[1] = volcDict['firstLayer'].getWord(word[1], maxLength=15, usingJson=usingJson)
# OM
if 'opinion' in volcDict:
t = word[0][0] if type(word[0]) == tuple else word[0]
p = None
if t == 'HOT': p = 2
elif t == 'HO': p = -1
elif t == 'OT': p = 1
if p is not None:
newW[p] = volcDict['opinion'].getWord(word[p], maxLength=15, usingJson=usingJson)
if 'target' in volcDict:
t = word[0][0] if type(word[0]) == tuple else word[0]
p = None
if t == 'HOT': p = -1
elif t == 'HT': p = -1
elif t == 'T': p = 1
elif t == 'OT': p = -1
if p is not None:
newW[p] = volcDict['target'].getWord(word[p], maxLength=15, usingJson=usingJson)
if 'holder' in volcDict:
t = word[0][0] if type(word[0]) == tuple else word[0]
p = None
if t == 'HOT': p = 1
elif t == 'HT': p = 1
elif t == 'H': p = 1
elif t == 'HO': p = 1
if p is not None:
newW[p] = volcDict['holder'].getWord(word[p], maxLength=15, usingJson=usingJson)
return newW
def getMainVolcSize(volcDict):
if type(volcDict) == dict:
return len(volcDict['main'])
elif type(volcDict) == list:
return sum([len(v['main']) for v in volcDict])
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage:', sys.argv[0], 'dataPickle logPickle outFilePrefix', file=sys.stderr)
exit(-1)
with open(sys.argv[1], 'r+b') as f:
dataP = pickle.load(f)
with open(sys.argv[2], 'r+b') as f:
logP = pickle.load(f)
outFilePrefix = sys.argv[3]
log0 = logP[0]
clf = log0['clf']
params = log0['param']
volcDict = { 'main': dataP['mainVolc'] }
with open(outFilePrefix + '_coeff.csv', 'w') as f:
print(clf, file=f)
print('Parameters:', toStr(params), sep=',', file=f)
printCoef(clf, volcDict, i2Label, sort=True, reverse=True, outfile=f)
X = dataP['X']
y = dataP['y']
trainIndex = log0['split']['trainIndex']
testIndex = log0['split']['testIndex']
valScore = log0['valScore']
testScore = log0['testScore']
with open(outFilePrefix + '_X.csv', 'w') as f:
print(clf, file=f)
print('Parameters:', toStr(params), file=f)
print('valScore:', valScore, file=f)
print('testScore:', testScore, file=f)
print('Training Data:', file=f)
XTrain = X[trainIndex]
yTrain = y[trainIndex]
yTrainPredict = log0['predict']['yTrainPredict']
printXY(XTrain, yTrain, yTrainPredict, volcDict, i2Label, outfile=f)
print('Testing Data:', file=f)
XTest = X[testIndex]
yTest = y[testIndex]
yTestPredict = log0['predict']['yTestPredict']
printXY(XTest, yTest, yTestPredict, volcDict, i2Label, outfile=f)
with open(outFilePrefix + '_log.csv', 'w') as f:
print('C\ttrain\tval\ttest', file=f)
printCScore(logP, 'Accuracy', y, outfile=f)
for log in logP:
printLog(log, y, i2Label, outfile=f)
| true
|
e1dabbf959497c3f4cf0ced808b4d1c185a8c238
|
Python
|
Toha-K-M/Basic-problems-vault
|
/Math Problems/OOP/Sorting objects according to attributes.py
|
UTF-8
| 308
| 3.375
| 3
|
[] |
no_license
|
class v:
def __init__(self,name,weight):
self.name = name
self.weight = weight
a = v('a',20)
b = v('b',10)
c = v('c',15)
li = [a,b,c]
sorted_list = sorted(li, key=lambda v:v.weight) # sorting object weight attribute nie
for i in sorted_list:
print(i.name, "=", i.weight)
| true
|
f8a4fdb9ea39bef76ec5fada1d1241b5d0aa23f5
|
Python
|
aoyono/sicpy
|
/Chapter1/themes/sqrt_newton.py
|
UTF-8
| 2,977
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
from operator import add, lt, sub, truediv
from Chapter1.themes.compound_procedures import square
from Chapter1.themes.lisp_conditionals import lisp_abs
def sqrt_iter(guess, x):
"""/!\ RecursionError rqised when using lisp_if"""
if is_good_enough(guess, x):
return guess
return sqrt_iter(improve(guess, x), x)
# return lisp_if(
# is_good_enough(guess, x),
# guess,
# sqrt_iter(improve(guess, x), x)
# )
def improve(guess, x):
return average(
guess,
truediv(x, guess)
)
def average(x, y):
return truediv(
add(x, y),
2
)
def is_good_enough(guess, x):
return lt(
lisp_abs(sub(square(guess), x)),
0.001
)
def sqrt(x):
return sqrt_iter(1.0, x)
def sqrt_unified(x):
"""A version of sqrt embedding its intermediate steps: block structure
"""
def is_good_enough(guess, x):
return lt(
lisp_abs(sub(square(guess), x)),
0.001
)
def average(x, y):
return truediv(
add(x, y),
2
)
def improve(guess, x):
return average(
guess,
truediv(x, guess)
)
def sqrt_iter(guess, x):
"""/!\ RecursionError rqised when using lisp_if"""
if is_good_enough(guess, x):
return guess
return sqrt_iter(improve(guess, x), x)
# return lisp_if(
# is_good_enough(guess, x),
# guess,
# sqrt_iter(improve(guess, x), x)
# )
return sqrt_iter(1.0, x)
def sqrt_unified_ls(x):
"""A version of sqrt embedding its intermediate steps and not passing x as a parameter to steps: lexical scoping
"""
def is_good_enough(guess):
return lt(
lisp_abs(sub(square(guess), x)),
0.001
)
def average(x, y):
return truediv(
add(x, y),
2
)
def improve(guess):
return average(
guess,
truediv(x, guess)
)
def sqrt_iter(guess):
"""/!\ RecursionError rqised when using lisp_if"""
if is_good_enough(guess):
return guess
return sqrt_iter(improve(guess))
# return lisp_if(
# is_good_enough(guess, x),
# guess,
# sqrt_iter(improve(guess, x), x)
# )
return sqrt_iter(1.0)
def run_the_magic():
print('(sqrt 9) : %s\n(sqrt (+ 100 37)) : %s\n(sqrt (+ (sqrt 2) (sqrt 3))) : %s\n(square (sqrt 1000)) : %s\n' % (
sqrt(9),
sqrt(add(100, 37)),
sqrt(add(sqrt(2), sqrt(3))),
square(sqrt(1000))
))
print(
"With internal block structure:",
"(square (sqrt_unified 1000)) : {}".format(square(sqrt_unified(1000)),),
"(square (sqrt_unified_ls 1000)) : {}".format(square(sqrt_unified_ls(1000)),),
sep='\n',
)
if __name__ == '__main__':
run_the_magic()
| true
|
20c31f946a65b3f44902adc1dc2a6226f0e5cf79
|
Python
|
owenstudy/octopusforcastbtc
|
/ordermanage.py
|
UTF-8
| 3,055
| 2.765625
| 3
|
[] |
no_license
|
# -*- coding: UTF-8 -*-
import time,traceback
import btc38.btc38client
import bterapi.bterclient
import wex.btcwexclient
'''统一订单的管理到一个文件中'''
class OrderManage:
#market, 支持这两个参数 bter, btc38
def __init__(self,market):
self.market=market
#初始化两个市场的接口模块
if market=='bter':
self.clientapi=bterapi.bterclient.Client()
elif market=='btc38':
self.clientapi=btc38.btc38client.Client()
# TODO
elif market =='wex':
self.clientapi = wex.btcwexclient.Client()
#提交定单,只需要传入参数就直接执行
#('doge_cny','sell',0.03,1000)
def submitOrder(self,pair, trade_type, rate, amount, connection=None, update_delay=None, error_handler=None):
return self.clientapi.submitOrder(pair,trade_type,rate,amount)
#得到某个COIN或者全部的余额信息
#pair e.g. doge_cny
def getMyBalance(self,coin=None):
time.sleep(0.1)
return self.clientapi.getMyBalance(coin)
#取消定单
def cancelOrder(self,orderid,coin_code=None):
return self.clientapi.cancelOrder(orderid,coin_code)
#订单状态
def getOrderStatus(self,orderid,coin_code=None):
time.sleep(0.1)
return self.clientapi.getOrderStatus(orderid,coin_code)
#订单open列表
def getOpenOrderList(self,coin_code_pair):
return self.clientapi.getOpenOrderList(coin_code_pair)
#市场深度
def getMarketDepth(self,coin_code_pair):
coin_code=coin_code_pair.split('_')[0]
market_type=coin_code_pair.split('_')[1]
return self.clientapi.getMarketDepth(coin_code,market_type)
# 获取交易的历史记录
def getMyTradeList(self,coin_code_pair):
coin_code=coin_code_pair.split('_')[0]
return self.clientapi.getMyTradeList(coin_code)
# 获取当前的市场价格
def getMarketPrice(self,coin_code_pair):
return self.clientapi.getPrice(coin_code_pair)
#test
if __name__=='__main__':
"""
#test open order list
orderhandler=OrderManage('btc38')
orderlist=orderhandler.getOpenOrderList('doge_cny')
for order in orderlist:
print('order_id:%s,order_market:%s,coin_code_pair:%s'%\
(order.order_id,order.market,order.coin_code_pair))
pass
"""
# bterorder=OrderManage('bter')
# order=bterorder.submitOrder('doge_btc','sell',0.01,100)
# cancelorder=bterorder.cancelOrder(order.order_id,'doge')
#
# orderstatus=bterorder.getOrderStatus(order.order_id,'doge')
# print(orderstatus)
"""
bal=bterorder.getMyBalance('doge')
print('BTER:%f'%bal)
btc38order=OrderManage('btc38')
bal=btc38order.getMyBalance('doge')
print('BTC38:%f'%bal)
"""
btcwex = OrderManage('wex')
btc38 = OrderManage('btc38')
depthwex = btcwex.getMarketDepth('ltc_btc')
depthbtc38 = btc38.getMarketDepth('ltc_btc')
print(str(depthwex))
print(depthbtc38)
| true
|
f8ac3946f092f0f85eb488ceb40c0a2dcec9fed8
|
Python
|
crowdhackathon-agrifood/autoGrow
|
/BackEnd-AutoGrow/BackEnd/AutoGrowClient/TCP.py
|
UTF-8
| 2,644
| 2.578125
| 3
|
[] |
no_license
|
import time, socket, select
import Cmd, ClientProcess
Host = ""
Port = 8888
AliveSendEvery = 2 # Send alive every x seconds
LastAliveSent = -AliveSendEvery # Make sure it fires immediatelly
########################################################################
## Init Socket
SocketConnected = False
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
Socket.setblocking(False)
Socket.settimeout(0.5)
Socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
Socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True)
########################################################################
## Socket line helper functions
def SetSocketConnectedInternal(aConnected):
global SocketConnected
if aConnected != SocketConnected:
SocketConnected = aConnected
if SocketConnected:
print "Connected to Server"
else:
print "Disconnected from Server"
return
def SocketConnect():
global SocketConnected
if not SocketConnected:
Socket.connect((Host, Port))
SetSocketConnectedInternal(True)
return
def SocketDisconnect():
global SocketConnected
if SocketConnected:
try:
SetSocketConnectedInternal(False)
Socket.shutdown()
except:
pass
return
def SocketIsReadable():
if not SocketConnected:
Result = False
else:
ReadableSockets, WritableSockets, ErrorSockets = select.select([Socket], [], [], 0)
Result = len(ReadableSockets) > 0
return Result
def ReceiveBytes(aNumBytes):
Result = ""
try:
while len(Result) != aNumBytes:
Received = Socket.recv(aNumBytes - len(Result))
if not Received:
raise Exception("Socket disconnected while reading")
SetSocketConnectedInternal(True)
Result = Result + Received
except:
SocketDisconnect()
raise
return Result
def SendBytes(aBuffer):
global SocketConnected
try:
Socket.sendall(aBuffer)
SetSocketConnectedInternal(True)
except:
SocketDisconnect()
raise
return
MaxPacketLengthCharacters = 6
MaxCmdLengthCharacters = 3
def ProcessResultBuffer(aBufferType):
Response = ReceiveBytes(MaxPacketLengthCharacters)
BytesInPacket = int(Response)
Response = ReceiveBytes(BytesInPacket)
return Response
def SocketProcess():
# Check incoming messages from the peer
while SocketConnected and SocketIsReadable():
try:
BufferType = ReceiveBytes(1) # ctInitiator, ctResponse or ctResponseException
if BufferType == ctInitiator:
ProcessInitiatorBuffer()
else:
ProcessResultBuffer(ctResponse)
except:
pass
return
| true
|
3aeb88dfe40281c34101d5b080df3bd33ab99f54
|
Python
|
eirikhoe/advent-of-code
|
/2019/14/sol.py
|
UTF-8
| 4,470
| 3.3125
| 3
|
[] |
no_license
|
from pathlib import Path
import re
from math import ceil
import copy
data_folder = Path(__file__).parent.resolve()
file = data_folder / "input.txt"
find_ingredients = re.compile(r"(\d+ \w+)+")
class Ingredient:
def __init__(self, name, quantity):
self.name = name
self.quantity = int(quantity)
class Reaction:
def __init__(self, recipe):
ingredients = find_ingredients.findall(recipe)
for i in range(len(ingredients)):
ingredients[i] = ingredients[i].split(" ")
ingredients[i] = Ingredient(ingredients[i][1], ingredients[i][0])
self.reactants = dict(
zip(
[ingredient.name for ingredient in ingredients[:-1]],
[ingredient.quantity for ingredient in ingredients[:-1]],
)
)
self.product = ingredients[-1]
self.n_reactants = len(self.reactants)
class Reactions:
def __init__(self, reactions):
self.reactions = dict(
zip([reaction.product.name for reaction in reactions], reactions)
)
def total_reactants(self, chemical):
total = set()
if chemical == "ORE":
return total
for name in self.reactions[chemical].reactants:
total = total.union(self.total_reactants(name)).union({name})
return total
def get_ore_cost(self, ingredient):
if ingredient.name == "ORE":
return ingredient.quantity
reaction = self.reactions[ingredient.name]
multiple = ceil(ingredient.quantity / reaction.product.quantity)
ingredients = copy.deepcopy(reaction.reactants)
for name in ingredients:
ingredients[name] *= multiple
while (len(ingredients.keys()) > 1) or (list(ingredients.keys())[0] != "ORE"):
for curr_ingredient_name in ingredients:
if curr_ingredient_name != "ORE":
total_reactants_other = set()
for name in ingredients:
if name != curr_ingredient_name:
total_reactants_other = total_reactants_other.union(
self.total_reactants(name)
)
if curr_ingredient_name not in total_reactants_other:
ingredients = self._replace_ingredient_with_reactants(
curr_ingredient_name, ingredients
)
break
return ingredients["ORE"]
def _replace_ingredient_with_reactants(self, ingredient_name, ingredients):
multiple = ceil(
ingredients[ingredient_name]
/ self.reactions[ingredient_name].product.quantity
)
for reactant_name in self.reactions[ingredient_name].reactants:
added_reactant = (
multiple * self.reactions[ingredient_name].reactants[reactant_name]
)
if reactant_name in ingredients:
ingredients[reactant_name] += added_reactant
else:
ingredients[reactant_name] = added_reactant
del ingredients[ingredient_name]
return ingredients
def get_max_fuel(self, ore_reserve):
fuel = Ingredient("FUEL", 1)
unit_cost = self.get_ore_cost(fuel)
l = ore_reserve // unit_cost
fuel.quantity = l
if self.get_ore_cost(fuel) == ore_reserve:
return fuel.quantity
r = l * 2
fuel.quantity = r
while self.get_ore_cost(fuel) <= ore_reserve:
r *= 2
fuel.quantity = r
while r - l > 1:
mid = (r + l) // 2
fuel.quantity = mid
cost = self.get_ore_cost(fuel)
if cost == ore_reserve:
return mid
elif cost < ore_reserve:
l = mid
else:
r = mid
return l
def main():
reactions = Reactions([Reaction(reaction) for reaction in file.read_text().split("\n")])
ingredient = Ingredient("FUEL", 1)
print("Part 1")
print(
f"The minimum cost of producing one unit of FUEL is {reactions.get_ore_cost(ingredient)} ORE"
)
print()
print("Part 2")
ore_reserve = 1000000000000
print(
f"The maximum amount of FUEL that can be produced for\n{ore_reserve} ORE is {reactions.get_max_fuel(ore_reserve)} FUEL"
)
if __name__ == "__main__":
main()
| true
|
4ead09ea8770c14dc9b9c6119514fd469f4b47d0
|
Python
|
sparsh0008/EboxPython
|
/StudentDetails/Student.py
|
UTF-8
| 751
| 2.828125
| 3
|
[] |
no_license
|
class Student:
def __init__(self,__id,__username,__password,__name,__address,__city,__pincode,__contact_number,__email):
self.__id = __id
self.__username = __username
self.__password = __password
self.__name = __name
self.__address = __address
self.__city = __city
self.__pincode = __pincode
self.__contact_number = __contact_number
self.__email = __email
def __str__(self):
return "Id : {0}\nUser Name : {1}\nPassword : {2}\nName : {3}\nAddress : {4}\ncity : {5}\nPincode : {6}\nContact Number : {7}\nemail : {8}".format(self.__id, self.__username, self.__password, self.__name, self.__address, self.__city, self.__pincode, self.__contact_number, self.__email)
| true
|
0de5bf6cc4d6a6201f7cb8e5b74749cf4b3264b3
|
Python
|
MATA62N/RascalC
|
/python/convert_xi_to_multipoles.py
|
UTF-8
| 1,869
| 3.125
| 3
|
[] |
no_license
|
### Script to convert a measured 2PCF in xi(r,mu) format to Legendre multipoles, i.e. xi_ell(r).
### This computes all even multipoles up to a specified maximum ell, approximating the integral by a sum.
### The output form is a text file with the first column specifying the r-bin, the second giving xi_0(r), the third with xi_2(r) etc.
from scipy.special import legendre
import os,sys,numpy as np
## PARAMETERS
if len(sys.argv)!=4:
print("Usage: python convert_xi_to_multipoles.py {INFILE} {MAX_L} {OUTFILE}")
sys.exit()
infile = str(sys.argv[1])
max_l = int(sys.argv[2])
outfile = str(sys.argv[3])
assert max_l>=0, "Maxmum multipole must be positive"
assert max_l%2==0, "Only even Legendre multipoles can be computed"
assert max_l<8, "High order multipoles cannot be reliably computed"
if not os.path.exists(infile):
raise Exception('Could not find input file %s'%infile)
r_bins = np.genfromtxt(infile,max_rows=1)
mu_bins = np.genfromtxt(infile,max_rows=1,skip_header=1)
xi_vals = np.genfromtxt(infile,skip_header=2)
## Now convert to Legendre multipoles
xi_mult = np.zeros((len(r_bins),max_l//2+1))
for ell in np.arange(0,max_l+1,2):
leg_mu = legendre(ell)(mu_bins) # compute mu bins
# Compute integral as Int_0^1 dmu L_ell(mu) xi(r, mu) * (2 ell + 1)
xi_mult[:,ell//2] = np.sum(leg_mu*xi_vals*(mu_bins[1]-mu_bins[0]),axis=1)*(2.*ell+1)
with open(outfile,"w+") as out:
# First row contains labels
out.write("# r-bin (Mpc/h)\t")
for ell in np.arange(0,max_l+1,2):
out.write("# ell = %s\t"%ell)
out.write("\n")
# Now write data to file with each radial bin in a separate row
for r_i,r in enumerate(r_bins):
out.write("%.8e\t"%r)
for ell in np.arange(0,max_l+1,2):
out.write("%.8e\t"%xi_mult[r_i,ell//2])
out.write("\n")
print("Output file saved to %s"%outfile)
| true
|
b0d5a3fe511e26d128e4ddc2bd97b05b27a4c565
|
Python
|
duonghanbk/Python
|
/function/3. function can return something.py
|
UTF-8
| 107
| 2.75
| 3
|
[] |
no_license
|
def add(num1, num2):
print "Tong cua %d va %d la:" % (num1, num2)
return num1 + num2
print add(2,5)
| true
|
ac06daadf58665b99082a98b6f8583e22f740880
|
Python
|
techtronics/marble
|
/scrape/azlyrics_scrape.py
|
UTF-8
| 805
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import string, urllib2, httplib
from bs4 import BeautifulSoup
from urlparse import urljoin
ROOT_URL = "http://www.azlyrics.com/"
index_urls = []
for letter in string.lowercase:
index_urls.append(urljoin(ROOT_URL, letter + ".html"))
index_urls.append(urljoin(ROOT_URL, "19.html"))
for index_url in index_urls:
req = urllib2.Request(index_url)
response = None
try:
response = urllib2.urlopen(req)
page_contents = response.read()
soup = BeautifulSoup(page_contents,"html.parser")
container = soup.select("body > div.container.main-page")
container_soup = BeautifulSoup(container)
for link in container_soup.findAll('a'):
print link
except httplib.BadStatusLine:
import pdb; pdb.set_trace()
| true
|
06fad78c8f4824646a24888a1d06461060726faf
|
Python
|
dupl10/dupl
|
/UnivRanking.py
|
UTF-8
| 436
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import requests, bs4
url="http://www.zuihaodaxue.cn/BCSR/huaxue2017.html"
res=requests.get(url)
res.encoding=res.apparent_encoding
soup=bs4.BeautifulSoup(res.text,'html.parser')
#print soup.tbody('tr')
s=''
for elem in soup.tbody('tr')[:-1]:
for i in elem.descendants:
if type(i) == bs4.element.NavigableString:
s+=i+'\t'
elif i.string == None:
break
s+='\n'
print(s)
| true
|
113381cc355fe032eb4e9b323073858a1a613d0f
|
Python
|
zhenglinj/FangjiaViewer
|
/FangjiaScrapy/FangjiaScrapy/spiders/LianjiaBankuai.py
|
UTF-8
| 1,330
| 2.515625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from ..items import Zone
class LianjiabankuaiSpider(scrapy.Spider):
name = 'LianjiaBankuai'
allowed_domains = ['lianjia.com']
root_url = "https://hz.lianjia.com"
start_urls = ['https://hz.lianjia.com/ershoufang/']
def parse(self, response):
selections = response.xpath(
"/html/body/div[3]/div[@class='m-filter']/div[@class='position']/dl[2]/dd/div[1]/div/a")
for sel in selections:
zone = Zone()
zone["district"] = sel.xpath("text()").extract_first()
link = sel.xpath("@href").extract_first() # eg: /ershoufang/xihu/
url = self.root_url + link
zone["district_url_lj"] = link
yield Request(url=url, callback=self.process_section1, meta={'item': zone})
def process_section1(self, response):
selections = response.xpath(
"/html/body/div[3]/div[@class='m-filter']/div[@class='position']/dl[2]/dd/div[1]/div[2]/a")
for sel in selections:
zone = response.meta.get('item').copy()
zone["bizcircle"] = sel.xpath("text()").extract_first()
link = sel.xpath("@href").extract_first() # eg: /ershoufang/cuiyuan/
zone["bizcircle_url_lj"] = link
yield zone
| true
|
a8a6d084a784d3226d718d06f039234e377b20fe
|
Python
|
ttlmtang123/HttpInterfaceTesting
|
/lib/handleurl.py
|
UTF-8
| 10,121
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
## 时间: 2015-02-13
## 跟新内容:
## 增加URL请求时间计算
## 时间: 2015-04-01
## 跟新内容:
## 将指定的测试文件名写入配置文件中,同时增加一个获取当前路径的类
##
import urllib.request
import urllib.parse
import urllib.error
from pathlib import Path
import json
import io
import sys
import traceback
import os
import os.path
import time
import gc
import logging
import configparser
import xml.etree.ElementTree as ET
import random #用于产生随机数字及字母
# this class will create customer url request
class handleurl(object):
## for the future account
## URL_FILE_PATH = 'E:/Work/script/SimulatorAccountlist.xml'
## for the stock account
## StockAccountURLlistxml
#URL_FILE_PATH = 'StrategyURLlist.xml'
CONFIG_FILE=r'/conf/xmlFilename.conf'
URLTypes = []##保存<protocol name="https" brokername="微量期货仿真账户">name 属性
urllinks =[] #
URLSubNames=[] #login link
brokernames = [] #保存<protocol name="https" brokername="微量期货仿真账户">brokename 属性
httpstatus = ""
httpreason = ""
httpReturnValue= "" #JSON 对象
urltaketime ="" #URL请求花费时间
ISDEBUG=False ### True:debug模式,False:运行模式
global null # JSON 数据返回包含null 数据
null = 'null'
def __init__(self):
super()
def getConfigFile(self):
""" 测压文件已经放在config文件里"""
cf = configparser.ConfigParser()
if self.ISDEBUG: ## Use debug mode
cfFile= r'../conf/xmlFilename.conf'
else:
cfFile= os.path.normcase(os.getcwd() + os.sep +self.CONFIG_FILE)
print("[cofiguurefie]"+ cfFile)
cf.read(cfFile)
xmlfilename = cf['xmlfile']['FileName']
return xmlfilename
def getConfigFolder(self):
""" 返回配置文件xml所在的目录"""
cf = configparser.ConfigParser()
cf = configparser.ConfigParser()
if self.ISDEBUG: ## Use debug mode
cfFile= r'../conf/xmlFilename.conf'
else:
cfFile= os.path.normcase(os.getcwd() + os.sep +self.CONFIG_FILE)
print(cfFile)
cf.read(cfFile)
foldername = cf['xmlfile']['FoldName']
print(foldername)
return foldername
def getXMLFileFullPath(self):
if self.ISDEBUG: ## Use debug mode
xmlfilepath =os.path.normcase(r'..\\' +self.getConfigFolder() +os.sep +self.getConfigFile())
else:
xmlfilepath= os.path.normcase(os.getcwd() + os.sep +self.getConfigFolder() +os.sep +self.getConfigFile())
print(xmlfilepath)
return xmlfilepath
def getRootElement(self,xmlfilepath):
""" get Root element """
tree =ET.parse(xmlfilepath)
root =tree.getroot()
return root
def initTestArguments(self):
"Retyrb arry for link"
root = self.getRootElement(self.getXMLFileFullPath())
for child in root:
for item in child:
self.URLTypes.append(child.get('name'))
self.brokernames.append(child.get('Target'))
## rdmNum = random.randrange(1000,9999,4)
## rdmChar = random.choice('abcdefghijklmnopqrstuvwxyz')+ random.choice('abcdefghijklmnopqrstuvwxyz')
## itemurllink = item.get('url').replace('tang123','tang123'+str(rdmNum)+rdmChar)
itemurllink = item.get('url')
self.urllinks.append(itemurllink)
#self.urllinks.append(item.get('url'))
self.URLSubNames.append(item.text)
#print( self.URLTypes, self.brokernames,self.urllinks,self.URLSubNames)
def getURLType(self,urllink):
Re = urllib.parse.urlparse(urllink)
self.URLType = Re.scheme
return self.URLType
def getBaselink(self,urllink):
Re = urllib.parse.urlparse(urllink)
baseurl = Re.scheme + '://' + Re.netloc + Re.path + "?"
return baseurl
def getparams(self,urllink):
"""return interl parse mapping obj """
Re = urllib.parse.urlparse(urllink)
parm = urllib.parse.parse_qsl(Re.query)
return urllib.parse.urlencode(parm)
def PrpcesshttpRequest(self, baseurl,parms,encodemethod='utf-8',processmethod='GET'):
#print(baseurl)
#print(parms.encode(encodemethod))
#req = urllib.request.Request(url=baseurl,data=parms.encode(encodemethod),method=processmethod)
#print("[Handle URL]:",baseurl+str(parms.urldecode))
print('\n[URL]', baseurl + urllib.parse.unquote(parms))
try:
## strtime = time.process_time()
strtime = time.perf_counter()
req = urllib.request.Request(url=baseurl+str(parms))
httpresp = urllib.request.urlopen(req)
## endtime = time.process_time()
endtime = time.perf_counter()
## logging.info("URL tale time:", str((endtime-strtime)/1000000)) #计算http请求花费的时间
print("\n[URL 请求花费时间]:", str((endtime-strtime)/1000000),"秒") #计算http请求花费的时间
print("【URL 请求花费时间】:", str((endtime-strtime)/1000),"豪秒") #计算http请求花费的时间
self.urltaketime = str((endtime-strtime)/1000)#计算http请求花费的时间
self.httpstatus =httpresp.status
self.httpreason =httpresp.reason
#self.httpReturnValue = httpresp.read().decode('utf-8')
jstr = httpresp.read().decode('utf-8')
self.httpReturnValue = self.handleJsonString(jstr)
except urllib.error.HTTPError as httperr:
print("[Http Error]",httperr)
except urllib.error.URLError as urlerr:
print("[Error]",urlerr)
def handleJsonString(self, jstr):
"""
处理 http response 字串
返回一个处理好的 html 表的字串
# html 表格式:
===============================================================
|状态 (k)|返回值(v)|状态 (k)|返回值(v)| 状态 (k)|返回值(v)|
===============================================================
| state | 0 |info |xxxxx |total |XXXX |
================================================================
|Data| XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
================================================================
"""
tbstr = r'<table border="1">'\
+ r'<tr>'\
+ r'<td align="center">状态(k)</td>'\
+ r'<td align="center">值(v)</td>'\
+ r'<td align="center">状态(k)</td>'\
+ r'<td align="center">值(v)</td>'\
+ r'<td align="center">状态(k)</td>'\
+ r'<td align="center">值(v)</td>'\
+ r'</tr>'
tbend = r'</table>'
trstr = r'<tr>'
trend = r'</tr>'
tdstr = r'<td align="center">'
tdend = r'</td>'
tdfull = "" #完整的td标签 <td></td>
dictre = json.loads(jstr)#转为dict
for key,value in dictre.items():
if key != 'data':
tdfull = tdfull + tdstr+ key +tdend + tdstr+ str(value) +tdend #完整的td标签
trfull = trstr + tdfull + trend #完整的tr标签
datastr = dictre['data']
datacollect = ""
## print(type(datastr))
if type(datastr) == type (None):
datacollect = null
elif type(datastr) == type(dict()):
for k,v in datastr.items():
datacollect = datacollect + str(k) + r' : ' + str(v) + r'<hr />'
## print(k,"===.",v )
elif type(datastr) == type(list()):
if len(datastr) == 0:
datacollect = datacollect + '[]'
else:
for item in datastr:
datacollect = datacollect + str(item) + r'<hr />'
else:
datacollect = datacollect + str(datastr)
datatdfull = tdstr + "data" + tdend \
+ r'<td align="left" colspan ="5">' \
+ datacollect \
+ tdend
trfull = trfull + trstr + datatdfull + tdend #完整的td标签
tbstr = tbstr + trfull + tbend
return tbstr
## def getBaseURL(self,urlstr):
## """ Return base url"""
## Re = parse.urlparse(urlstr)
## baseurl = Re.scheme + "://" +Re.netloc + Re.path
## return baseurl
##
## def getEncodeParameters(self,urlstr):
## """ Return Parameters """
## Re = parse.urlparse(urlstr)
## PsRe= urllib.parse.parse_qsl(Re.query)
## params = PsRe.urlencode(PsRe)
## return params
##
##
## def getRequest(self,urlstr):
## """ Return overwrite Request """
## baseurl = getBaseURL(urlstr)
## encodeparams = getEncodeParameters(urlstr)
## req = request.Request(url = baseurl,data = encodeparams,method = 'GET')
## return req
## def getStatus(self,urlrequest):
## """Return http accwss status"""
## f = urllib.request.urlopen(urlrequest)
## return f.status
##
## def getReason(self,urlrequest):
## """Return http accwss status"""
## f = urllib.request.urlopen(urlrequest)
## return f.reason
##
## def getReturnVaile(self,urlrequest):
## """Return http accwss status"""
## f = urllib.request.urlopen(urlrequest)
## return f.read().decode('utf-8')
##if __name__ =='__main__':
##
## handURL = handleurl()
## handURL.initTestArguments()
##
| true
|
5cf15557b156455e545ca57a8716ba1bc6010b3c
|
Python
|
masheransari/Python-Practice_1
|
/Task5/Data.py
|
UTF-8
| 395
| 3.71875
| 4
|
[] |
no_license
|
import random
class Data:
def __init__(self, data1, data2):
self.data1 = data1
self.data2 = data2
self.data1 = random.randint(0, 10)
def getdataFromUser(self):
self.data2 = int(input("Enter any number: "))
def showdata(self):
printData = "The value of data1 = " + str(self.data1) + " & data2 = " + str(self.data2)
print(printData)
| true
|
616ea96b6cb6e1f64bf20f3b43e7d0532d6bdb9b
|
Python
|
Jinmin-Goh/BOJ_PS
|
/Solved/11284/11284.py
|
UTF-8
| 1,061
| 3.140625
| 3
|
[] |
no_license
|
# Problem No.: 11284
# Solver: Jinmin Goh
# Date: 20220811
# URL: https://www.acmicpc.net/problem/11284
import sys
def main():
s = input()
first_list = ['ㄱ', 'ㄲ', 'ㄴ', 'ㄷ', 'ㄸ', 'ㄹ', 'ㅁ', 'ㅂ', 'ㅃ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅉ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
middle_list = ['ㅏ', 'ㅐ', 'ㅑ', 'ㅒ', 'ㅓ', 'ㅔ', 'ㅕ', 'ㅖ', 'ㅗ', 'ㅘ', 'ㅙ', 'ㅚ', 'ㅛ', 'ㅜ', 'ㅝ', 'ㅞ', 'ㅟ', 'ㅠ', 'ㅡ', 'ㅢ', 'ㅣ']
last_list = ['', 'ㄱ', 'ㄲ', 'ㄳ', 'ㄴ', 'ㄵ', 'ㄶ', 'ㄷ', 'ㄹ', 'ㄺ', 'ㄻ', 'ㄼ', 'ㄽ', 'ㄾ', 'ㄿ', 'ㅀ', 'ㅁ', 'ㅂ', 'ㅄ', 'ㅅ', 'ㅆ', 'ㅇ', 'ㅈ', 'ㅊ', 'ㅋ', 'ㅌ', 'ㅍ', 'ㅎ']
idx = ord(s) - 44032
first_word = first_list[idx // (len(middle_list) * len(last_list))]
middle_word = middle_list[(idx % (len(middle_list) * len(last_list))) // len(last_list)]
last_word = last_list[idx % len(last_list)]
print(first_word)
print(middle_word)
print(last_word)
return
if __name__ == "__main__":
main()
| true
|
a46caa0c49d039319dae72c0778da6a9b7c48410
|
Python
|
PacktPublishing/Python-Real-World-Machine-Learning
|
/Module 1/Chapter 6/bag_of_words.py
|
UTF-8
| 1,220
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from nltk.corpus import brown
from chunking import splitter
if __name__=='__main__':
# Read the data from the Brown corpus
data = ' '.join(brown.words()[:10000])
# Number of words in each chunk
num_words = 2000
chunks = []
counter = 0
text_chunks = splitter(data, num_words)
for text in text_chunks:
chunk = {'index': counter, 'text': text}
chunks.append(chunk)
counter += 1
# Extract document term matrix
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=5, max_df=.95)
doc_term_matrix = vectorizer.fit_transform([chunk['text'] for chunk in chunks])
vocab = np.array(vectorizer.get_feature_names())
print "\nVocabulary:"
print vocab
print "\nDocument term matrix:"
chunk_names = ['Chunk-0', 'Chunk-1', 'Chunk-2', 'Chunk-3', 'Chunk-4']
formatted_row = '{:>12}' * (len(chunk_names) + 1)
print '\n', formatted_row.format('Word', *chunk_names), '\n'
for word, item in zip(vocab, doc_term_matrix.T):
# 'item' is a 'csr_matrix' data structure
output = [str(x) for x in item.data]
print formatted_row.format(word, *output)
| true
|
f60f140de6584795be839044bce0f79ec943a8e3
|
Python
|
kieran-walker-0/iris
|
/iris.py
|
UTF-8
| 7,581
| 2.53125
| 3
|
[] |
no_license
|
# Internet Vulnerability Scanner and Reporting Tool
import shodan, datetime, nested_lookup
print("""
Welcome to IRIS! In order to use this program, you need a Shodan API key.
You can get one by signing up to the Shodan service here: https://account.shodan.io/register
""")
api_key = raw_input("Please input a valid Shodan API key: ")
print("Connecting to Shodan...")
try:
api = shodan.Shodan(api_key)# Shodan API initialisation.
keyinfo = api.info()
except:
print("An error occured, be sure to check your API key, internet connection and if Shodan is accessible.")
quit()
def main():
while True:
print('''
. . . . .
. ..xxxxxxxxxx.... . . .
. MWMWMWWMWMWMWMWMWMWMWMWMW .
IIIIMWMWMWMWMWMWMWMWMWMWMWMWMWMttii: . .
. IIYVVXMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWxx... . .
IWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMWMx..
IIWMWMWMWMWMWMWMWMWBY%ZACH%AND%OWENMWMWMWMWMWMWMWMWMWMWMWMWMx.. .
""MWMWMWMWMWM"""""""". .:.. ."""""MWMWMWMWMWMWMWMWMWMWMWMWMWti.
. "" . ` .: . :. : . . :. . . . . """"MWMWMWMWMWMWMWMWMWMWMWMWMti=
. . :` . : . .'.' '....xxxxx...,'. ' ' ."""YWMWMWMWMWMWMWMWMWMW+
; . ` . . : . .' : . ..XXXXXXXXXXXXXXXXXXXXx. ` . "YWMWMWMWMWMWMW
. . . . . . . ..XXXXXXXXWWWWWWWWWWWWWWWWXXXX. . . """""""
' : : . : . ...XXXXXWWW" W88N88@888888WWWWWXX. . . . .
. ' . . : ...XXXXXXWWW" M88N88GGGGGG888^8M "WMBX. . .. :
: ..XXXXXXXXWWW" M88888WWRWWWMW8oo88M WWMX. . : .
"XXXXXXXXXXXXWW" WN8888WWWWW W8@@@8M BMBRX. . : :
. XXXXXXXX=MMWW": . W8N888WWWWWWWW88888W XRBRXX. . .
.... ""XXXXXMM::::. . W8@889WWWWWM8@8N8W . . :RRXx. .
``...''" MMM::.:. . W888N89999888@8W . . ::::"RXV . :
. ..'"'" MMMm::. . WW888N88888WW . . mmMMMMMRXx
..' . ""MMmm . . WWWWWWW . :. :,miMM"' : ''` .
. . 'MMMMmm . . . . ._,mMMMM"' : ' . :
. ""'MMMMMMMMMMMMM"' . : . ' . .
. . . . . .
. . . .
IRIS - Initial Recon Internet Scanner
[1] Scan for vulnerable hosts.
[0] Quit.
''')
valid_opts = {"0", "1"}
opt = raw_input(">")
if (opt in valid_opts) == True:
pass
else:
print("Invalid option, please try again.")
main()
if opt == "0":
print("Quitting...")
quit()
if opt == "1":
apiCall()
def deviceInfo():
print("Gathering vulnerability data...")
timestamp = datetime.date.today()# Retrieves current date in YYYY-MM-DD format.
fname_raw = "IRIS-raw-"+query+"-"+str(timestamp)+".txt"# Creates filename for raw JSON to be created/opened later on.
fname_cve = "IRIS-cve-"+query+"-"+str(timestamp)+".txt"# Creates filename for CVE and CVSS scores to be created.
keys = nested_lookup.get_all_keys(page)# Gets all keys for the nested dictionary.
device_ips = []
device_cves = []
device_cvss = []
vuln_counter = 0
for ip in nested_lookup.nested_lookup('ip_str', page):# Sorts IP addresses and removes dupes. Shoutout to Jack for showing me nested_lookup.
if ip in device_ips:
continue
else:
device_ips.append(ip)
for cve in keys:
if str(cve).startswith('CVE-'):
device_cves.append(cve)
vuln_counter += 1
for cvss in nested_lookup.nested_lookup('cvss', page):
device_cvss.append(float(cvss))
f_cve = open(fname_cve, "a")
for n in range(0, vuln_counter):
vuln_print = str(device_cves[int(n)]) + " - Severity: "+ str(device_cvss[int(n)]) + " - " + "https://www.cvedetails.com/cve/"+str(device_cves[int(n)] + "\n")
f_cve.write(str(vuln_print))# Appends CVEs and CVSS scores to file.
print("Saving found vulnerabilities to " + str(fname_cve))
f_cve.close()
f_raw = open(fname_raw, "w")
f_raw.write(str(page))
print("Saving raw JSON output to " + str(fname_raw))
f_raw.close()
def apiCall():
global page
global total
global pageno_range
global query
print("""
This module will use the Shodan API to collect hosts based on given input.
If any known vulnerabilities exist within the host, IRIS will retrieve CVE information and include it in the final report.
Once all potentially vulnerable hosts have been harvested, you will be given a text file with CVE IDs and CVSS scores found during the scan.
WARNING: Please be aware that your account may only have a limited amount of credits, and larger searches may exceed this limit.
Check here for further information: https://help.shodan.io/the-basics/credit-types-explained
To learn more about Shodan query strings, see: https://help.shodan.io/the-basics/search-query-fundamentals
Remaining query credits: %s
"""% (keyinfo['query_credits']))
query = raw_input("Please enter your query> ")
if query == '':
print("Blank query submitted! Returning to main menu...")
main()
print("Gathering host information...")
pageno = []# Number list must be generated based on amount of total hosts.
#NOTE: 100 hosts per page. So 416 pages will require ints 1 to 5 in the list.
try:
initial_search = api.search(query)
except:
print("An error occured, be sure to check your API key, internet connection and if Shodan is accessible.")
main()
total = (initial_search['total'])# Grabs first page and total host amount.
page = {}# Initial dictionary to house nested dicts for each page.
if total < 100:
divide = 1
else:
divide = total / 100
if (total % 100) != 0:
divide += 1
pageno_range = range(1, divide+1)
for n in pageno_range:
pageno.append(n)# Generates the list of page numbers.
page[n] = {}# Generates nested dicts within page dict.
for n in pageno:
try:
print("Gathering results ("+str(n)+"/"+str(divide)+")...")
search = api.search(query, page=n)
page[n] = search# Loops through dictionaries filling them with 100 hosts each.
except:# This should only really be thrown with a connection error, or if user CTRL+C's.
print("An exception occured, this is normally due to a timeout while requesting data from the Shodan API.")
error_opt = raw_input("Do you want to [retry] data collection or [stop] data collection? (retry|stop): ")
if error_opt == "retry":
print("Resuming data collection...")
try:
del page[n-1]# Deletes nested dict and retries from scratch.
retry_current = api.search(query, page=n-1)
except:
retry_current = api.search(query, page=n)
print("Gathering results ("+str(n)+"/"+str(divide)+")...")
pass
if error_opt == "stop":
print("Returning to main menu...")
main()
deviceInfo()
main()
| true
|
02f00c0711e8b95dd8e6008d30b2a5bc89374b24
|
Python
|
EunJooJung/Python
|
/Python01/com/test03/meter03_gugu.py
|
UTF-8
| 755
| 4.03125
| 4
|
[] |
no_license
|
#-*- coding:utf-8 -*-
# 1. for문을 사용하여 구구단 전체를 출력하는 gugu()함수를 만들자.
# 2. while문을 사용하여 입력된 숫자의 단만을 출력하는 gugudan(x)를 만들자.
# 3. main 만들어서 위의 두 함수를 호출하자.
def gugu():
for i in range(2,10):
for j in range(2,10):
print('%d * %d =' %(i, j) ,i*j)
def gugudan():
x = input('x 입력 :')
for i in range(2,10):
print('%d * %d =' %(int(x),i), int(x)*i)
def guguwile(x):
print(str(x)+'단')
i=1
while i<10:
print('{} * {} = {}'.format(x, i, x*i))
i+=1
if __name__ == '__main__':
gugu()
#gugudan()
guguwile(3)
| true
|
3a3921f4abeb7a2f6c432f8be59346fd5c0a514e
|
Python
|
avivorly/Encrypt
|
/build/lib/Encrypt_Lab/Sim.py
|
UTF-8
| 12,763
| 2.78125
| 3
|
[] |
no_license
|
# import from evey possible location
try:
from Encrypt_Lab.Input import Input
except:
from Input import Input
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QLabel, QVBoxLayout, QGridLayout, QWidget, QScrollArea
import numpy as np
# text labels
md5 = {
'top label': 'Enter your message below',
'msg':['Message','The message that Alice will send to Bob.'],
'Binary_Massage':['Binary Massage','The binary sequence which is equivalent to the message.'],
'alice_values':["Alice's Bits", "The bits that are sent by Alice to Bob."],
'alice_bases':["Alice's Bases","The sequence of bases that Alice uses for sending the bits."],
'alice_final':["Alice's Final Bits","Alice's bits after dismissing the bits where Alice and Bob didn't choose the same base"],
'eav_bases':["Eve's Bases",'The sequence of bases that Alice uses for receiving the bits.'],
'eav_values':["Eve's Bits","The bits that are received by Eve from Alice and are sent to Bob."],
'bob_bases':["Bob's Bases",'The sequence of bases that Bob uses for receiving the bits.'],
'bob_values':["Bob's Bits","The bits that are received by Bob from Eve."],
'bob_values_clean':["Bob's Reduced Bits","Bob's bits after cleaning the bits where Alice and Bob didn't choose the same bases."],
'bob_final':["Bob's Final Bits","Bob's bits after dismissing the bits where Alice and Bob didn't choose the same base"],
'safe_channel':['Is the Chanel Safe?',"If 'True' - Eve wasn't detected. If 'False' - Eve was detected."],
'bob_after_test':['Key',"The bits that are used for encryption (bits that weren't in use for the testing of the presence of Eve)."],
'enc_msg':['Encrypted Message','The message that Alice sent to Bob after using the key for encrypting the message.'],
'dyc_msg':['The Decrypted Message','The message that Bob received after using the key for decrypting the message.'],
'dyc_msg_str':['The Received Message','The message that Bob received after using the key for decrypting the message.'],
'play':['Start procudre','start botton'],
}
# translate char to binary
def to_binary(s):
h = {
'!': 26,
'?': 27,
'+': 28,
'-': 29,
'$': 30,
'₪': 31,
}
s = s.upper()
binary = ''
for c in s:
num = ord(c) - ord('A')
if not 0 <= num <= 25:
num = h[c]
binary = binary + '{0:05b}'.format(num)
return binary
# translate from binary
def from_binary(s):
h = {
26:'!' ,
27:'?' ,
28:'+' ,
29:'-' ,
30:'$' ,
31:'₪'
}
ints = []
chunk = ''
for bit in s:
chunk = chunk + str(bit)
if len(chunk) == 5:
ints.append(chunk)
chunk = ''
ints = [int(aa,2) for aa in ints]
word = ''
for n in ints:
if n in h:
word = word + h[n]
else:
word = word + chr(65+n)
return word
# convert boolean (0\1) into basis (+\x)
def basis(arr):
return ['+' if x else 'x' for x in arr]
# simulating transerfing if data between two bases with random choice when bases are different
def transfer(base1, base2, values):
new_values = values.copy()
diff = base1 != base2
new_values[diff] = rand(np.count_nonzero((diff)))
return new_values
# mark the unmatched values
def transfer_view(base1, base2, values):
new_values = values.copy()
new_values[base1!=base2] = 2
return new_values
# remove the unmatched values
def clean(base1, base2, values):
return np.delete(values, np.argwhere(base1 != base2))
# mark the unmatched values
def clean_view(base1, base2, values):
values_new = values.copy()
values_new[base1!=base2] = 2
return values_new
# generate random sequence of length N
def rand(N):
return np.random.randint(2, size=int(N), dtype=bool)
# calculating statical properties of the distribution
def statistics(num):
safe_len = int(num)
alice_bases = rand(safe_len)
eav_bases = rand(safe_len)
bob_bases = rand(safe_len)
alice_values = rand(safe_len)
eav_values = transfer(eav_bases, alice_bases, alice_values)
bob_values = transfer(eav_bases, bob_bases, eav_values)
alice_values_clean = clean_view(bob_bases, alice_bases, alice_values)
bob_values_clean = clean_view(bob_bases, alice_bases, bob_values)
bob_final = clean(bob_bases, alice_bases, bob_values)
alice_final = clean(bob_bases, alice_bases, alice_values)
h = {'Bits count': safe_len, 'Matching bases count': len(alice_final),
'Matching values count': np.count_nonzero(alice_final == bob_final),
'Mismatching values count': np.count_nonzero(alice_final != bob_final)} # the actual return dictionary
h['Disagreement ratio'] = h['Mismatching values count'] / h['Matching bases count']
return h
# the gui class with PyQt5
class Mod(QScrollArea):
def __init__(self, parent, s=''):
super().__init__(parent)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.ws = []
self.w = w = QWidget()
self.setWidgetResizable(True)
self.setWidget(w)
l = self.l = QVBoxLayout()
w.setLayout(l)
lay = QVBoxLayout()
self.setLayout(lay)
# make sure only valid chars enter the textline
def reg(value):
import re
value = value.upper()
allowed = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','Q','W','Y','Z','!','?','+','-','$']
return ''.join(c for c in value if c in allowed)
# Input will create gui inputs and link them to o dictionary
ww = Input(w, 'string', 'message', opts={'filter':reg,'title':md5['top label'], 'def': 'Y','vertical':'yes', 'center_title':True, 'center_text':True, 'font':20})
ww.setStyleSheet('background-color: #1ffab2; font-size: 15px; ')
Input(w, 'bool', 'eav', opts={'title':'is Eve present','def':1,'vertical': 'yes','center_title':True})
Input(w, 'integer', 'test', opts={'title':'number of test bits','def': 5, 'vertical': 'yes','center_title':True })
self.play_btn = QPushButton(md5['play'][0])
self.play_btn.setToolTip(md5['play'][1])
l.addWidget(self.play_btn)
self.play_btn.clicked.connect(self.play)
def widget(self, w, center = True, big=False):
if str(w.__class__)=="<class 'PyQt5.QtWidgets.QLabel'>":
w.setStyleSheet('background-color: #1ffab2; font-size: 15px; ')
if big:
w.setStyleSheet(f"font-size: 25px")
self.ws.append(w)
if center:
self.l.addWidget(w, alignment=Qt.AlignCenter)
else:
self.l.addWidget(w)
def play(self):
flag = True
while flag:
try:
self.play_real()
flag = False
except:
pass
def play_real(self):
for w in self.ws:
w.deleteLater()
self.ws = []
self.Binary_Massage = to_binary(self.w.o['message'])
self.msg = self.w.o['message']
self.eav = self.w.o['eav']
self.check_len = self.w.o['test']
real_len = len(self.Binary_Massage)
test_len = max(self.w.o['test'],1)
sigma = int((real_len + test_len) ** 0.5)
safe_len = int(2 * (real_len + test_len) + 1 * sigma)
self.alice_bases = rand(safe_len)
self.eav_bases = rand(safe_len)
self.bob_bases = rand(safe_len)
self.alice_values = rand(safe_len)
self.eav_values = transfer(self.eav_bases,self.alice_bases, self.alice_values)
if self.eav:
self.bob_values = transfer(self.eav_bases, self.bob_bases, self.eav_values)
else:
self.bob_values = transfer(self.alice_bases, self.bob_bases, self.alice_values)
########## encription ##################
self.alice_values_clean = clean_view(self.bob_bases,self.alice_bases, self.alice_values)
self.bob_values_clean = clean_view(self.bob_bases, self.alice_bases, self.bob_values)
self.bob_final = clean(self.bob_bases, self.alice_bases, self.bob_values)
self.alice_final = clean(self.bob_bases, self.alice_bases, self.alice_values)
self.safe_channel = np.array_equal(self.alice_final[-self.check_len:], self.bob_final[-self.check_len:])
self.bob_after_test = self.bob_final.copy()[:-self.check_len]
self.alice_after_test = self.alice_final.copy()[:-self.check_len]
self.real_key_alice = self.alice_after_test[:real_len]
self.real_key_bob = self.bob_after_test[:real_len]
# self.long_key = self.alice_values_clean[self.check_len:]
error = False
eff_msg = self.Binary_Massage
if len(self.real_key_bob) < len(self.Binary_Massage):
error = True
eff_msg = eff_msg[:len(self.real_key_bob)]
self.enc_msg = np.bitwise_xor(np.array([int(aa) for aa in eff_msg]) ,self.real_key_alice)
self.dyc_msg = np.bitwise_xor(self.enc_msg, self.real_key_bob)
self.dyc_msg_str = from_binary(self.dyc_msg)
first_line = []
second_line = []
labels = {}
# print every step in that array
for i, s in enumerate(['msg','Binary_Massage', 'alice_values', 'alice_bases', 'eav_bases', 'eav_values',
'bob_bases', 'bob_values', 'bob_values_clean', 'alice_final','bob_final', 'safe_channel', 'bob_after_test','enc_msg', 'dyc_msg', 'dyc_msg_str']):#'remobe different bases','alice_values_clean', 'bob_values_clean','safe_channel', 'long_key', 'real_key','enc_msg', 'dyc_msg']):
if 'eav' in s and not self.eav:
continue
l = QLabel(md5[s][0])
l.setToolTip(md5[s][1])
self.widget(l, big=True)
if s in ['safe_channel']:
self.widget(QLabel(str(self.safe_channel)), big=True)
continue
widget = QWidget(self)
ll = QGridLayout()
widget.setLayout(ll)
arr = []
labels[s] = arr
if hasattr(self,s):
if 'base' in s:
text = basis(getattr(self, s))
else:
try:
text = getattr(self, s).astype(int) # from bool to int
except:
text = getattr(self, s)
for j in range(safe_len):
color = 'black'
if j <= len(text) - 1:
char = text[j]
else:
char = ' '
if char == 2:
char = ''
t = QLabel(str(char))
if s in ['msg', 'dyc_msg','real_key','enc_msg', 'bob_final','alice_final', 'bob_after_test', 'dyc_msg_str'] and char == ' ':
continue
else:
if s in ['msg',]:#'dyc_msg'
t.setFixedSize(30*5+24, 30)
else:
t.setFixedSize(30, 30)
t.setStyleSheet(f'border-color:{color}; border-width: 3px;border-style: solid; text-align: center')
t.setAlignment(Qt.AlignCenter)
if s == 'msg':
first_line.append(t)
self.msg_widget = widget
if s == 'Binary_Massage':
self.fg_widget = widget
second_line.append(t)
ll.addWidget(t, i, j)
arr.append(t)
if s != 'msg':
self.widget(widget) # add another widget
else:
self.ws.append(widget) # add another widget
self.l.addWidget(widget)
widget.move(0,0) # move to begging
# color similar columns
for i, (a_v, a_b, b_v, b_b) in enumerate(zip(labels['alice_values'], labels['alice_bases'], labels['bob_values'], labels['bob_bases'])):
if a_b.text() != b_b.text():
color = 'red'
elif a_v.text() == b_v.text():
color = 'green'
else:
color = 'orange'
for col in ['alice_values', 'alice_bases', 'bob_values', 'bob_bases', 'eav_bases', 'eav_values', 'bob_values_clean']:
if col in labels:
labels[col][i].setStyleSheet(f'border-color:{color}; border-width: 3px;border-style: solid; text-align: center')
if error:
self.widget(QLabel('key was not long enough'))
| true
|
ae0a636a424ef083b2ec91281bafe39069e1f600
|
Python
|
vyadzmak/Cent.Api
|
/models/app_models/dynamic_table_models/dynamic_table_model.py
|
UTF-8
| 453
| 3.203125
| 3
|
[] |
no_license
|
class DynamicTableHeader():
def __init__(self,text,align,value):
self.text =text
self.align = align
self.value = value
pass
class DynamicTable():
def __init__(self):
self.headers =[]
self.items =[]
pass
def init_header_element(self, text,align,value):
self.headers.append(DynamicTableHeader(text, align, value))
def init_item(self,item):
self.items.append(item)
| true
|
ebce387e9c8bcfe2285f309991719870a80b7900
|
Python
|
Mhmdbakhtiari/rivals-workshop-assistant
|
/tests/test_sprite_generation.py
|
UTF-8
| 4,477
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
import pytest
from PIL import Image, ImageDraw
import rivals_workshop_assistant.asset_handling.sprite_generation as src
from tests.testing_helpers import make_canvas, assert_images_equal
def show_delta(img1, img2):
diff = Image.new("RGB", img1.size, (255, 255, 255))
for x1 in range(img1.size[0]):
for y1 in range(img1.size[1]):
x2 = img1.size[0] - 1 - x1
y2 = img1.size[1] - 1 - y1
p1 = img1.getpixel((x1, y1))
p2 = img2.getpixel((x2, y2))
p3 = round((p1[0] / 2) - (p2[0] / 2)) + 128
diff.putpixel((x1, y1), (p3, p3, p3))
img1.show()
img2.show()
diff.show()
@pytest.mark.parametrize(
'size',
[
pytest.param(22),
pytest.param(43)
]
)
def test__generate_sprite_for_file_name__circle(size):
file_name = f"circle_{size}.png"
result = src.generate_sprite_for_file_name(file_name)
expected = make_canvas(size, size)
ImageDraw.Draw(expected).ellipse((0, 0, size - 1, size - 1),
outline="black")
assert_images_equal(result, expected)
@pytest.mark.parametrize(
'color, size',
[
pytest.param("red", 25),
pytest.param("blue", 199)
]
)
def test__generate_sprite_for_file_name__circle_colored(color, size):
file_name = f"{color}_circle_{size}.png"
result = src.generate_sprite_for_file_name(file_name)
expected = make_canvas(size, size)
ImageDraw.Draw(expected).ellipse((0, 0, size - 1, size - 1),
fill=color,
outline="black")
assert_images_equal(result, expected)
@pytest.mark.parametrize(
'width, height',
[
pytest.param(25, 15),
pytest.param(10, 100)
]
)
def test__generate_sprite_for_file_name__ellipse(width, height):
file_name = f"ellipse_{width}_{height}.png"
result = src.generate_sprite_for_file_name(file_name)
expected = make_canvas(width, height)
ImageDraw.Draw(expected).ellipse((0, 0, width - 1, height - 1),
outline="black")
assert_images_equal(result, expected)
@pytest.mark.parametrize(
'color, width, height',
[
pytest.param('orange', 5, 10),
pytest.param('blue', 66, 55)
]
)
def test__generate_sprite_for_file_name__ellipse_colored(color, width, height):
file_name = f"{color}_ellipse_{width}_{height}.png"
result = src.generate_sprite_for_file_name(file_name)
expected = make_canvas(width, height)
ImageDraw.Draw(expected).ellipse((0, 0, width - 1, height - 1),
fill=color,
outline="black")
assert_images_equal(result, expected)
@pytest.mark.parametrize(
'width, height',
[
pytest.param(12, 55),
pytest.param(100, 300)
]
)
def test__generate_sprite_for_file_name__rect(width, height):
file_name = f"rect_{width}_{height}.png"
result = src.generate_sprite_for_file_name(file_name)
expected = make_canvas(width, height)
ImageDraw.Draw(expected).rectangle((0, 0, width - 1, height - 1),
outline="black")
assert_images_equal(result, expected)
@pytest.mark.parametrize(
'color, width, height',
[
pytest.param('orange', 51, 4),
pytest.param('blue', 305, 511)
]
)
def test__generate_sprite_for_file_name__rect_colored(color, width, height):
file_name = f"{color}_rect_{width}_{height}.png"
result = src.generate_sprite_for_file_name(file_name)
expected = make_canvas(width, height)
ImageDraw.Draw(expected).rectangle((0, 0, width - 1, height - 1),
fill=color,
outline="black")
assert_images_equal(result, expected)
@pytest.mark.parametrize(
'file_name',
[
pytest.param('whatever'),
pytest.param('circle_rect_ellipse'),
pytest.param('circle_10_10'),
pytest.param('ellipse_30'),
pytest.param('red_blue_rect_30_30'),
pytest.param('blue_rect_30_30_30'),
pytest.param('rect_30_30_30'),
pytest.param('rect_blue_30_30'),
]
)
def test__generate_sprite_for_file_name__unrelated_file_names(file_name):
result = src.generate_sprite_for_file_name(file_name)
assert result is None
"""
"rect_34_36.png"
"orange_rect_3_5.png"
unrelated names
"""
| true
|
59776ba3e2cc86d0d0e3ec9f3996513be7d642ca
|
Python
|
L4SS3h/SenseHatDemo
|
/writetext.py
|
UTF-8
| 179
| 2.8125
| 3
|
[] |
no_license
|
from sense_hat import SenseHat
import time
s = SenseHat()
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
while True:
s.show_message("Hello World!", text_colour=red)
| true
|
6011fdd1099311a731b5bffe909f0d6981e86d8a
|
Python
|
bssrdf/pyleet
|
/S/SolvingQuestionsWithBrainpower.py
|
UTF-8
| 2,260
| 4
| 4
|
[] |
no_license
|
'''
-Medium-
*DP*
You are given a 0-indexed 2D integer array questions where questions[i] = [pointsi, brainpoweri].
The array describes the questions of an exam, where you have to process the questions in order (i.e., starting from question 0) and make a decision whether to solve or skip each question. Solving question i will earn you pointsi points but you will be unable to solve each of the next brainpoweri questions. If you skip question i, you get to make the decision on the next question.
For example, given questions = [[3, 2], [4, 3], [4, 4], [2, 5]]:
If question 0 is solved, you will earn 3 points but you will be unable to solve questions 1 and 2.
If instead, question 0 is skipped and question 1 is solved, you will earn 4 points but you will be unable to solve questions 2 and 3.
Return the maximum points you can earn for the exam.
Example 1:
Input: questions = [[3,2],[4,3],[4,4],[2,5]]
Output: 5
Explanation: The maximum points can be earned by solving questions 0 and 3.
- Solve question 0: Earn 3 points, will be unable to solve the next 2 questions
- Unable to solve questions 1 and 2
- Solve question 3: Earn 2 points
Total points earned: 3 + 2 = 5. There is no other way to earn 5 or more points.
Example 2:
Input: questions = [[1,1],[2,2],[3,3],[4,4],[5,5]]
Output: 7
Explanation: The maximum points can be earned by solving questions 1 and 4.
- Skip question 0
- Solve question 1: Earn 2 points, will be unable to solve the next 2 questions
- Unable to solve questions 2 and 3
- Solve question 4: Earn 5 points
Total points earned: 2 + 5 = 7. There is no other way to earn 7 or more points.
Constraints:
1 <= questions.length <= 105
questions[i].length == 2
1 <= pointsi, brainpoweri <= 105
'''
from typing import List
class Solution:
def mostPoints(self, questions: List[List[int]]) -> int:
Q, n = questions, len(questions)
dp = [0]*(n+1)
for i in range(n-1, -1, -1):
p, b = Q[i]
dp[i] = max(dp[i+1], p + (dp[i+b+1] if i+b+1 <= n else 0))
# print(dp)
return dp[0]
if __name__ == "__main__":
print(Solution().mostPoints(questions = [[3,2],[4,3],[4,4],[2,5]]))
print(Solution().mostPoints(questions = [[1,1],[2,2],[3,3],[4,4],[5,5]]))
| true
|
245baad54f8589635bdcfe7863fe8b9ab1a1abc2
|
Python
|
bgoonz/UsefulResourceRepo2.0
|
/MY_REPOS/DATA_STRUC_PYTHON_NOTES/WEEKS/wk17/CodeSignal-Solutions/08_-_matrixElementsSum.py
|
UTF-8
| 334
| 3.296875
| 3
|
[
"MIT",
"Python-2.0"
] |
permissive
|
def matrixElementsSum(matrix):
if len(matrix) > 1:
for row in range(1, len(matrix)):
for room in range(len(matrix[row])):
if matrix[row - 1][room] == 0:
matrix[row][room] = 0
sum = 0
for row in matrix:
for room in row:
sum += room
return sum
| true
|
1263fd998edd5de272f1c70bf76901f31733fb47
|
Python
|
MTGTsunami/LeetPython
|
/src/leetcode/binary_search/300. Longest Increasing Subsequence.py
|
UTF-8
| 755
| 3.78125
| 4
|
[] |
no_license
|
"""
Given an unsorted array of integers, find the length of longest increasing subsequence.
Example:
Input: [10,9,2,5,3,7,101,18]
Output: 4
Explanation: The longest increasing subsequence is [2,3,7,101], therefore the length is 4.
Note:
There may be more than one LIS combination, it is only necessary for you to return the length.
Your algorithm should run in O(n2) complexity.
Follow up: Could you improve it to O(n log n) time complexity?
"""
import bisect
class MySolution:
def lengthOfLIS(self, nums: List[int]) -> int:
res = []
for num in nums:
i = bisect.bisect_left(res, num)
if i == len(res):
res.append(num)
else:
res[i] = num
return len(res)
| true
|
40ee05b90727726c7faf937f50d15fddec59b78d
|
Python
|
BetaS/gotham-v2
|
/util/crypt_util.py
|
UTF-8
| 1,200
| 2.734375
| 3
|
[] |
no_license
|
#encoding: utf-8
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA512
from Crypto import Random
import os
def generateKey():
random_generator = Random.new().read
privatekey = RSA.generate(1024, random_generator)
publickey = privatekey.publickey()
f = open("private.key", "wb")
f.write(privatekey.exportKey('DER'))
f.close()
f = open("public.key", "wb")
f.write(publickey.exportKey('DER'))
f.close()
def sign(data):
if not (os.path.isfile("public.key") and os.path.isfile("private.key")):
print "[!] Key has not generated"
generateKey()
# Signing file
f = open("private.key", 'rb')
key = RSA.importKey(f.read())
signer = PKCS1_v1_5.new(key)
hash = SHA512.new(data)
s = signer.sign(hash)
f.close()
return s
def verify(data, sign):
if not (os.path.isfile("public.key") and os.path.isfile("private.key")):
print "[!] Key has not generated"
generateKey()
f = open("public.key", 'rb')
key = RSA.importKey(f.read())
signer = PKCS1_v1_5.new(key)
hash = SHA512.new(data)
f.close()
return signer.verify(hash, sign)
| true
|
34a87259677e427e749bf86ec53dc7c96f2a63d5
|
Python
|
aoeuidht/homework
|
/leetcode/297.serialize_and_deserialize_binary_tree.py
|
UTF-8
| 1,773
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from oj_helper import *
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if not root:
return ''
cands = [root]
node_list = []
while cands:
node = cands.pop(0)
node_list.append(node.val if node else '')
if node:
cands.append(node.left)
cands.append(node.right)
return ','.join(map(str, node_list))
def de_token(self, rst):
"""
Arguments:
- `self`:
- `rst`:
"""
for item in rst.split(','):
yield item
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if not data:
return None
tokens = self.de_token(data)
root = TreeNode(tokens.next())
nq = [root]
while nq:
node = nq.pop(0)
try:
l = tokens.next()
r = tokens.next()
if l:
node.left = TreeNode(l)
nq.append(node.left)
if r:
node.right = TreeNode(r)
nq.append(node.right)
except:
break
return root
if __name__ == '__main__':
r = TreeNode(0)
r.left = TreeNode(2)
r.right = TreeNode(3)
r.right.left = TreeNode(4)
r.right.right = TreeNode(5)
c = Codec()
print_bst(r)
r = TreeNode(0)
s = c.serialize(r)
print s
root = c.deserialize(s)
print_bst(root)
print(c.deserialize(''))
| true
|