blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08de1f917dace86cb7124d6906b69f3af922f184
|
7b55cfc4ffa7678e4c7b8f2312831ebbd549e54f
|
/proj1/tests/other-tests/strictfp_tests/error/test_if_continue.py
|
99887a16a7eb4db8dd94fbe0fc70562f84d77ea9
|
[] |
no_license
|
czchen1/cs164-projects
|
0d330efef85421e611a436b165428ba0ddfb3512
|
a04cafbcaafd32e518227dacf89a6d7837bf9f57
|
refs/heads/master
| 2020-03-27T04:03:31.727524
| 2018-08-23T21:43:46
| 2018-08-23T21:43:46
| 145,909,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19
|
py
|
if 1:
continue
|
[
"czchen@mit.edu"
] |
czchen@mit.edu
|
e82f2269c0d7467870ca0afcbf475266fb410271
|
6370a0ee5785204f7d2e31906c9e8fb61a51d530
|
/7_exception_assertion.py
|
9a7bed105133e8f67bdccfbbb4cefec8249c1448
|
[] |
no_license
|
P-tan/pythonIntro
|
7de9a813ed3f82d5bcfcea3234cca1f52c1945fe
|
7e794f38f0d9f8acb2b2726fc66a45d9d3a1063d
|
refs/heads/master
| 2020-12-31T01:02:31.926892
| 2016-09-15T20:16:10
| 2016-09-15T20:16:10
| 68,323,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
# -*- coding: utf-8 -*-
#%%
test = [1, 2, 3]
test[3]
#%% 7.1 例外の処理
# 未処理例外
#%% try-except
try:
numSuccesses = 1
numFailures = 0
successFailureRatio = numSuccesses / float(numFailures)
print 'The success / failure ratio is ', successFailureRatio
except ZeroDivisionError:
print 'No failures sor the success/failure ratio is undefined.'
print 'Now here'
#%% 指練習
def sumDigits(s):
"""sを文字列とする.
sの中の数字の合計を返す.
例えば, sが'a2b3c'ならば5を返す"""
sum = 0
for v in s:
try:
sum += int(v)
except ValueError:
sum
return sum
sumDigits('123')
sumDigits('a2b3c')
#%% 多相的
def readVal(valType, requestMsg, errorMsg):
while True:
val = raw_input(requestMsg + ' ')
try:
val = valType(val)
return val
except ValueError:
print val, errorMsg
readVal(int, 'Enter an integer:', 'is not an integer')
# 未処理例外が起こったときにプログラムが止まるのは良いこと
# 顕在的なバグ
#%% 複数の例外の処理
try:
raise ValueError
raise TypeError
except (ValueError, TypeError):
print 'error'
except: # すべての例外をキャッチ
#%% 7.2 フロー制御機構としての例外
#%% raise文
raise ValueError('hoge')
#%% 指練習
def findAnEven(I):
""" Iをint型の要素を持つリストとする。
Iに最初に現れる偶数を返す
Iが偶数を含まなければValueErrorを引き起こす"""
for i in I:
if(i % 2 == 0):
return i
raise ValueError('No even value.')
findAnEven([1])
findAnEven([2])
#%% getRatios
def getRatios(vect1, vect2):
ratios = []
for index in range(len(vect1)):
try:
ratios.append(vect1[index]/float(vect2[index]))
except ZeroDivisionError:
ratios.append(float('nan')) #nan = Not a Number
except:
raise ValueError('getRatios called with bad arguments')
return ratios
getRatios([1.0, 2.0, 7., 6.], [1., 2., 0., 3.])
getRatios([], [])
getRatios([1., 2.], [3.])
getRatios([1], [2, 3])
#%% 7.3 アサーション
assert False
assert False, 'message'
|
[
"ptanmail@gmail.com"
] |
ptanmail@gmail.com
|
657b0acf471470932cff4e18d229f72898a3701a
|
70607bac082f050bea5a11ba30c443441fd33c68
|
/django_project/blog/models.py
|
48088e6537f3f8880e17b5c82caa6bfae88d1e3d
|
[] |
no_license
|
SimiSips/django_project
|
80b84b0b3dc5c85fe978a59a943d05980af31493
|
a327fdd2e1c6568505fa7834f7344267fdadf470
|
refs/heads/master
| 2022-12-15T03:22:01.847076
| 2022-03-05T11:30:07
| 2022-03-05T11:30:07
| 214,234,413
| 0
| 0
| null | 2022-12-08T01:23:47
| 2019-10-10T16:39:09
|
Python
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
|
[
"noreply@github.com"
] |
SimiSips.noreply@github.com
|
cfbc0b358cbc8a73771ab602b38fe9a5b825e242
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/488/usersdata/341/112971/submittedfiles/AvF_Parte3.py
|
25d6392521b197f54357fe6d625293d8a2655e93
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
# -*- coding: utf-8 -*-
n = int(input('Digite a quantidade de números: '))
a = []
for i in range (n):
a.append('Digite os respectivos números: '))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
704d84a014b6fe90dba7d9e32dc3683b3fb8aac6
|
8a31f17ee9378acfc9173814cdb7b441210dc1df
|
/class3/assignment3q7.py
|
38c1ba84fac9ec7b466b358fbb38ab21d2bed034
|
[] |
no_license
|
Harmandhindsa19/Python-online
|
2f4f5ae6dd5dc2826add30f495aef37a46d077d0
|
e7f36c6d3e5de677260648ba13408864ae7dfedc
|
refs/heads/master
| 2020-03-26T20:17:45.382120
| 2018-09-18T15:55:01
| 2018-09-18T15:55:01
| 145,315,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
list=[2,3,4,5,6,7,8,9]
a = 0
b = 0
print("list is:", list)
for x in list:
if(x%2==0):
a=a+1
elif(x%2!=0):
b=b+1
print("the count for even values is:",a)
print("the count for odd values is:",b)
|
[
"dhindsaharman03@gmail.com"
] |
dhindsaharman03@gmail.com
|
32e0bd56b4c4a6012c44ffeeb0f3cca83a06e528
|
b14f04873a951ebb63593ca618af38bad5415c7a
|
/fetch_spacex.py
|
fdc073bdfcd84693663d7a06bb793a6b360e0a78
|
[] |
no_license
|
alezi06/space_insta
|
aa6ee1259cc9bedbf75cb62d50d8e82a0f35c00b
|
30821263ce8e7d4980f3dc22665d2513175033fb
|
refs/heads/master
| 2020-04-21T13:37:52.483650
| 2019-02-13T20:00:49
| 2019-02-13T20:00:49
| 169,603,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
import os
import requests
from helpers import get_image_path
def fetch_spacex_last_launch():
url = 'https://api.spacexdata.com/v3/launches/latest'
response = requests.get(url).json()
image_links_list = response['links']['flickr_images']
os.makedirs(get_image_path(), exist_ok=True)
for image_number, image_link in enumerate(image_links_list, 1):
file_path = get_image_path('spacex{}.jpg'.format(image_number))
response = requests.get(image_link)
with open(file_path, 'wb') as file:
file.write(response.content)
if __name__ == '__main__':
fetch_spacex_last_launch()
|
[
"alezi06@mail.ru"
] |
alezi06@mail.ru
|
8eaa24b375d82559d9b3864a8fb2e7b5b1b5628c
|
2e6f7d89f763b7e00f029fd4ed665469ada294bd
|
/recommendation-system/recommendation_system.py
|
2b3a6c934cccd779497d46fcc5dc545c23c95f02
|
[] |
no_license
|
N0nl1n34r/capgemini-expedition-coding-challenge
|
dab7dcc2aa4fc89b586e098f57355ccae1f5c660
|
f714020d3a5de6c1389af976d859fae0f0e18508
|
refs/heads/master
| 2023-07-04T07:43:06.663801
| 2021-07-29T17:55:08
| 2021-07-29T17:55:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
# this cli tool gives recommendations according to given rules.
# First argument is the json from which you want to give recommendations.
# Second argument is a dictionary where the key is a jsonata query and the value is a message,
# which gets printed, if the jsonata query evaluates to true.
from pyjsonata import jsonata
import json
def remove_chars(a_string, a_list_of_chars):
for a_char in a_list_of_chars:
a_string = a_string.replace(a_char, "")
return a_string
def get_recommended_actions(json_data, rules):
recommended_actions = []
for premise, action in rules.items():
if jsonata(premise, json_data) == 'true':
recommended_action = jsonata(action, json_data)
recommended_action = remove_chars(recommended_action, ['"', "[", "]", "{", "}", "\\"])
recommended_actions.append(recommended_action)
return recommended_actions
if __name__ == '__main__':
import sys
json_data = sys.argv[1]
rules = json.loads(sys.argv[2])
print(get_recommended_actions(json_data, rules))
|
[
"d.hessel@wwu.de"
] |
d.hessel@wwu.de
|
b524fe5caa3d77e5a88deb2e1aca3844f930eedf
|
40491d4649bc8f981cfd531657b0970e2577edd1
|
/Policy_Gradient/Tank_1/params.py
|
591b4c09a4383ccea277dcc219593c967ce568b8
|
[] |
no_license
|
emedd33/Reinforcement-Learning-in-Process-Control
|
d82ddab87dc6727a70ee38d53889aa8af87ade25
|
24bc1d9b72c0762bd92c215837347d6548099902
|
refs/heads/master
| 2022-07-12T02:53:52.208320
| 2022-04-05T15:23:48
| 2022-04-05T15:23:48
| 161,691,463
| 29
| 11
| null | 2022-06-21T21:39:15
| 2018-12-13T20:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
MAIN_PARAMS = {
"EPISODES": 20000,
"MEAN_EPISODE": 50,
"MAX_TIME": 200,
"RENDER": True,
"MAX_MEAN_REWARD": 200, # minimum reward before saving model
}
AGENT_PARAMS = {
"N_TANKS": 1,
"SS_POSITION": 0.5,
"VALVE_START_POSITION": 0.2,
"ACTION_DELAY": [5],
"INIT_ACTION": 0.3,
"VALVEPOS_UNCERTAINTY": 0,
"EPSILON_DECAY": [1],
"LEARNING_RATE": [0.0005],
"HIDDEN_LAYER_SIZE": [[5, 5]],
"BATCH_SIZE": 5,
"MEMORY_LENGTH": 10000,
"OBSERVATIONS": 4, # level, gradient, is_above 0.5, prevous valve position
"GAMMA": 0.9,
"EPSILON": [0],
"EPSILON_MIN": [0],
"BASE_LINE_LENGTH": 1,
"Z_VARIANCE": [0.05],
"SAVE_MODEL": [True],
"LOAD_MODEL": [False],
"TRAIN_MODEL": [True],
"LOAD_MODEL_NAME": [""],
"LOAD_MODEL_PATH": "Policy_Gradient/Tank_1/",
"SAVE_MODEL_PATH": "Policy_Gradient/Tank_1/",
}
# Model parameters Tank 1
TANK1_PARAMS = {
"height": 10,
"init_level": 0.5,
"width": 10,
"pipe_radius": 0.5,
"max_level": 0.75,
"min_level": 0.25,
}
TANK1_DIST = {
"add": True,
"pre_def_dist": False,
"nom_flow": 1, # 2.7503
"var_flow": 0.1,
"max_flow": 2,
"min_flow": 0.7,
"add_step": False,
"step_time": int(MAIN_PARAMS["MAX_TIME"] / 2),
"step_flow": 2,
"max_time": MAIN_PARAMS["MAX_TIME"],
}
TANK_PARAMS = [TANK1_PARAMS]
TANK_DIST = [TANK1_DIST]
|
[
"eskild.emedd33@gmail.com"
] |
eskild.emedd33@gmail.com
|
64b2863b968f115b0a83f9f3c7145c8dde3c61b7
|
a8494ac812d41567ff8427b7314410390195d7f1
|
/manager/models.py
|
4c7d68477fae658b85d37dcd25f620f1219320ec
|
[] |
no_license
|
iikyara/rktabot
|
527a05585e505d9c31398003ad87a78795a7d922
|
baf44732abb9aa1ac3da1256c129c4fd02ec83fc
|
refs/heads/master
| 2023-02-03T02:35:51.113283
| 2021-06-17T09:30:23
| 2021-06-17T09:30:23
| 87,445,624
| 0
| 0
| null | 2023-02-02T02:28:40
| 2017-04-06T15:31:37
|
Python
|
UTF-8
|
Python
| false
| false
| 809
|
py
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils import timezone
import datetime
class Post(models.Model):
title = models.CharField(max_length=40, default='')
sdate = models.DateField(default=datetime.date.today().strftime("%Y-%m-%d"))
starttime = models.TimeField(default=datetime.datetime.now().strftime("%H:%M"))
endtime = models.TimeField(default=datetime.datetime.now().strftime("%H:%M"))
content = models.CharField(max_length=60, default='')
available = models.BooleanField(default='True')
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"noreply@github.com"
] |
iikyara.noreply@github.com
|
38d2dbf7b4c4318db1aa5f047796969a2fa6f580
|
faa88b9ce08927d8b10df7ce4fa2b84a36ba1a1a
|
/main_part.py
|
f375259e69307399ed5e867056b61dce64e7caea
|
[] |
no_license
|
Hengle/AzurLaneCharCuter
|
c2c03ead4261b460026c58c06f4b3bc069eb407a
|
6bbbef1b99a265ccc6196ab714fb8cf004d51ba1
|
refs/heads/master
| 2020-04-13T09:16:47.926365
| 2018-10-07T04:23:15
| 2018-10-07T04:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
import os
import time
import holder as ch
differ = []
try:
os.makedirs("texture2D")
except FileExistsError:
pass
try:
os.makedirs("textAsset")
except FileExistsError:
pass
try:
os.makedirs("out")
except FileExistsError:
pass
texture2D = os.listdir('texture2D')
textAsset = os.listdir('textAsset')
text_ = []
for text in textAsset:
text_.append(text.split('.'))
textAsset = []
for text in text_:
textAsset.append(text[0])
textAsset = set(textAsset)
text_ = []
for text in texture2D:
text_.append(text.split('.'))
texture2D = []
for text in text_:
texture2D.append(text[0])
for name in texture2D:
if name not in textAsset:
print("切分文件丢失,请添加【" + name + ".atlas.txt】至TextAsset文件夹")
else:
differ.append( ch.body_cut(name))
print(f'完成一个,为{name}')
print("完成,将于15s后关闭")
time.sleep(15)
|
[
"noreply@github.com"
] |
Hengle.noreply@github.com
|
0b44cfd985f782a84ac2728b47d5a1534985b337
|
7aae6ca4e4eb098adfca9385f0861953f19a5b4a
|
/src/python/resultConvert.py
|
50a177b206f79c26cb68eacb5a7e91d078224314
|
[] |
no_license
|
Hangzhi/FBDP-Lab4
|
0871f4b3c66ed88d6e5192b733333e8d2170a95a
|
53a801b07f43d22c764a1ec62580712146373b3d
|
refs/heads/master
| 2021-10-09T07:12:44.765446
| 2018-12-23T13:33:07
| 2018-12-23T13:33:07
| 161,329,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 21:11:47 2018
@author: Lenovo
"""
import pandas as pd
df=[]
sentiDic={}
resultfile=open('scratchBayesResult.txt','r')
line=resultfile.readline()
while line:
line=line.rstrip()
tmpSplit=line.split('\t')
tmpSplit
sentiDic[tmpSplit[0]]=tmpSplit[2]
print(tmpSplit[1])
line=resultfile.readline()
for i in range(996):
tmpdic={}
filename=str(i)+'.txt'
f=open(filename,'r')
line=f.read()
tmpdic['atri']=sentiDic[str(i)]
tmpdic['index']=str(i)
tmpdic['context']=line
print(str(i))
df.append(tmpdic)
result=pd.DataFrame(df)
result.set_index(['index'], inplace = True)
result.to_csv('scratchBayesResultCpr.csv')
|
[
"noreply@github.com"
] |
Hangzhi.noreply@github.com
|
c3a893c3d848b53fed2af2a0af5ef2a746813b2d
|
352f7d1258e51d3b7e8cfcbb4b527c3e27a68fe5
|
/tests/test_img_server.py
|
b8eca0fb172da1de0c121455a4bcb1751b25020c
|
[] |
no_license
|
lidingke/fiberGeometry
|
67b53535ca1060af1ab29de915f1190258d7986e
|
1455fd815884a735d5b9e87aff07244ca9a95a23
|
refs/heads/master
| 2020-05-21T16:45:06.374649
| 2018-02-25T06:30:15
| 2018-02-25T06:30:15
| 62,809,512
| 1
| 1
| null | 2017-08-29T03:21:54
| 2016-07-07T13:37:45
|
C
|
UTF-8
|
Python
| false
| false
| 2,947
|
py
|
# coding:utf-8
from setting.config import SIMULATOR_IMG_SERVER_COFIG
from SDK.simulator.client import Client
from SDK.simulator.server import ImgServer, SeverMain, SharpSever
from threading import Thread
import multiprocessing
from tornado.ioloop import IOLoop
from functools import partial
from util.getimg import getImage
from tornado.iostream import StreamClosedError
import time
import logging
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
def test_sharpserver():
ss = SharpSever()
ss.getAll()
def test_imgserver():
u"""测试摄像头模拟器/图片服务器的性能
:return:
"""
host, port, method, path = SIMULATOR_IMG_SERVER_COFIG
port = 9885
# port = 9801
Thread(target = SeverMain, args=(host, port, method, path)).start()
# multiprocessing.Process(target=servermain).start()
# time.sleep(1)
img = getImage('IMG/midoc.BMP')
imgstr = img.tobytes()
result = IOLoop.current().run_sync(Client(port=port).get_img_once)
assert len(result) == len(imgstr)
assert imgstr != result
print len(result)
para = ('getImage', 'IMG/midoc.BMP')
IOLoop.current().run_sync(partial(Client(port=port).get_change,para))
result = IOLoop.current().run_sync(Client(port=port).get_img_once)
assert len(result) == len(imgstr)
assert imgstr == result
para = ('randomImg', 'IMG/G652/pk/')
IOLoop.current().run_sync(partial(Client(port=port).get_change, para))
result = IOLoop.current().run_sync(Client(port=port).get_img_once)
assert len(result) == len(imgstr)
assert imgstr != result
IOLoop.current().run_sync(Client(port=port).close_server)
def test_getimg_multi_connect():
u"""测试连接池取图片
:return:
"""
host, port, method, path = SIMULATOR_IMG_SERVER_COFIG
port = 9883
# port = 9801
img = getImage('IMG/midoc.BMP')
imgstr = img.tobytes()
# port = 9801
Thread(target = SeverMain, args=(host, port, method, path)).start()
# multiprocessing.Process(target=SeverMain, args=(port,)).start()
print 'start multi connect'
for x in range(0,100):
try:
# time.sleep(0.5)
result = IOLoop.current().run_sync(Client(port=port).get_img_once)
assert len(result) == len(imgstr)
except StreamClosedError:
logger.warning("Lost host at client %s")
return
except Exception as e:
print 'range time', x
raise e
if x%50 == 0:
print 'create times',x, time.time()
IOLoop.current().run_sync(Client(port=port).close_server)
# def test_imgserver():
# Thread(target = SeverMain).start()
# multiprocessing.Process(target=servermain).start()
# time.sleep(1)
if __name__ == "__main__":
port = 9880
para = ('randomImg', 'IMG/emptytuple/eptlight2')
IOLoop.current().run_sync(partial(Client(port=port).get_change, para))
|
[
"lidingke@hust.edu.cn"
] |
lidingke@hust.edu.cn
|
ef2e2684ce859f054346f0e34b65fe4d07d65ab1
|
d934d3084cd3dd38815706a72be321382eddc127
|
/mappings/MinorCPU_fpu.py
|
02ef6a9f937a73bc0571fd1914d7100918c7dedd
|
[] |
no_license
|
Accelergy-Project/gem5-accelergy-connector
|
58d6b27328c44a0e0512601ad9d42cf68cbf667c
|
e3af25e914e3a53f0bdfb8be784b33844baf6336
|
refs/heads/master
| 2023-03-24T19:56:33.674582
| 2021-03-17T19:40:39
| 2021-03-17T19:40:39
| 257,961,160
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
gem5_class = "MinorCPU"
accelergy_class = "func_unit"
path = "system.chip.cpu"
name_append = "fpu"
criteria = True
constants = [
("type", "fpu")
]
attributes = []
actions = [
("access",
["op_class_0::FloatAdd",
"op_class_0::FloatCmp",
"op_class_0::FloatCvt",
"op_class_0::FloatMult",
"op_class_0::FloatMultAcc",
"op_class_0::FloatDiv",
"op_class_0::FloatMisc",
"op_class_0::FloatSqrt"]),
("idle", ["system.cpu.numCycles", "system.switch_cpus.numCycles"],
["op_class_0::FloatAdd",
"op_class_0::FloatCmp",
"op_class_0::FloatCvt",
"op_class_0::FloatMult",
"op_class_0::FloatMultAcc",
"op_class_0::FloatDiv",
"op_class_0::FloatMisc",
"op_class_0::FloatSqrt"])
]
|
[
"frwang@mit.edu"
] |
frwang@mit.edu
|
05bf5726a0cdcdb2391ad837b8d4b800278ef405
|
04dcabae3204ea452bffa94aee36219fffb2f061
|
/generate_wakeword_data_lists.py
|
6a6c186cd88eb18c95e802de0832457ec15a59d0
|
[] |
no_license
|
alhuang10/SincNet
|
2ecce54bce7add1eb5ec3e02bc90325fe803b057
|
380fa11b9992a15e34d2710257bcaf5cae842047
|
refs/heads/master
| 2020-04-17T01:42:35.287785
| 2019-02-11T21:18:29
| 2019-02-11T21:18:29
| 166,104,961
| 0
| 0
| null | 2019-01-16T20:14:00
| 2019-01-16T20:13:59
| null |
UTF-8
|
Python
| false
| false
| 15,105
|
py
|
import os
import numpy as np
import random
import pickle
from collections import defaultdict, OrderedDict
OUTPUT_DIR = 'wakeword_file_lists'
def get_unique_items(label_dict_file):
label_dict = np.load(label_dict_file).item()
return len(set(list(label_dict.values())))
def generate_label_dict(file_set, output_dir, dict_name):
label_dict = {}
speaker_to_id = {}
current_count = 0
for f in file_set:
if f[-4:] == ".wav":
speaker_id = f.split('-')[1]
if speaker_id not in speaker_to_id:
label_dict[f] = current_count
speaker_to_id[speaker_id] = current_count
current_count += 1
else:
label_dict[f] = speaker_to_id[speaker_id]
np.save(os.path.join(output_dir, dict_name), label_dict)
def get_all_files_recursive(root_dir):
"""
Recursively walk through the dataset directory and return full filepaths to each file as a sorted list.
Args:
root_dir: The directory with all sound files.
Returns: Sorted list of all filepaths to sound files
"""
file_set = set()
for dir_, _, files in os.walk(root_dir):
for file_name in files:
relative_dir = os.path.relpath(dir_, root_dir)
relative_file = os.path.join(relative_dir, file_name)
file_set.add(relative_file)
file_set = list(file_set)
file_set.sort()
return file_set
def get_relevant_sound_files(file_set, utterance_text):
"""
Get's all the sound files of users mentioning an utterance text
Args:
file_set: The list of all sound files, filepaths to each file
utterance_text: The text we want the sound files to contain
Returns: A list of all the files with the utterance
"""
return [x for x in file_set if utterance_text in x and ".wav" in x]
def get_speaker_to_files_mapping(sound_file_list):
"""
Gets a mapping from speaker ID (eg. SV101) to all the sound files corresponding to that speaker
Args:
sound_file_list: List of all sound files we want to consider, typically all files containing a given utterance
Returns:
A dictionary mapping speaker ID to all sound files of that speaker saying a given text
"""
# Organize files by speaker
speaker_id_to_files = defaultdict(list)
for f in sound_file_list:
speaker = f.split('-')[1]
speaker_id_to_files[speaker].append(f)
return speaker_id_to_files
def get_softmax_training_data_lists(file_set, utterance_text, query_count_threshold, num_train_files_per_speaker):
"""
Get a train/test data list split given text for queries to contain, and appropriate thresholds
Args:
file_set: all sound files in the dataset
utterance_text: text that the sound queries should contain
query_count_threshold: number of queries that each speaker must have for them to be included in training
num_train_files_per_speaker: Given the "query_count_threshold" queries per speaker, number of files to use for
training and number of files to use for dev
Returns:
N/A, generates data lists and label dictionary
"""
# Directory name includes threshold/training counts
specific_output_directory = os.path.join(OUTPUT_DIR,
f"{utterance_text}_softmax_{query_count_threshold}_query_threshold_" +
f"{num_train_files_per_speaker}_train_count")
if not os.path.exists(specific_output_directory):
os.mkdir(specific_output_directory)
sound_files_containing_text = get_relevant_sound_files(file_set, utterance_text)
speaker_id_to_files = get_speaker_to_files_mapping(sound_files_containing_text)
valid_speaker_ids = [speaker_id for speaker_id, files in speaker_id_to_files.items()
if len(files) >= query_count_threshold]
print("Number of speakers with required number of utterances:", len(valid_speaker_ids))
train_files = []
dev_files = []
for _, files in speaker_id_to_files.items():
if len(files) >= query_count_threshold:
# Shuffle files and select files equal to "query_count_threshold"
random.shuffle(files)
selected_files = files[:query_count_threshold]
# Divide the selected files into train/dev
train_split = selected_files[:num_train_files_per_speaker]
dev_split = selected_files[num_train_files_per_speaker:]
train_files.extend(train_split)
dev_files.extend(dev_split)
with open(os.path.join(specific_output_directory, "all.txt"), 'w') as f:
for item in sound_files_containing_text:
f.write("%s\n" % item)
with open(os.path.join(specific_output_directory, "train.txt"), 'w') as f:
for item in train_files:
f.write("%s\n" % item)
with open(os.path.join(specific_output_directory, "dev.txt"), 'w') as f:
for item in dev_files:
f.write("%s\n" % item)
print(f"Number of train files: {len(train_files)}, Number of dev files: {len(dev_files)}")
# Generate the label dictionary for the files/speakers selected
selected_files = train_files + dev_files
generate_label_dict(selected_files, specific_output_directory, "label_dict.npy")
def get_enrollment_training_data_lists(file_set, utterance_text, query_count_threshold, num_train_files_per_speaker,
train_vs_enrollment_fraction):
"""
Get an training/enrollment data split as well as splitting the training list into TRAIN/DEV and the enrollment
list into ENROLL/TEST
Args:
file_set: all sound files in the dataset
utterance_text: text that the sound queries should contain
query_count_threshold: number of queries that each speaker must have for them to be included in training
num_train_files_per_speaker: Given the "query_count_threshold" queries per speaker, number of files to use for
training and number of files to use for dev
train_vs_enrollment_fraction: The fraction of total speakers to use for training, other portion will be used
for enrollment vector generation/testing
Returns:
N/A, generates data lists and label dictionary
"""
# Directory name includes threshold/training counts
specific_output_directory = os.path.join(OUTPUT_DIR,
f"{utterance_text}_enrollment_{query_count_threshold}_query_threshold_" +
f"{num_train_files_per_speaker}_train_count")
if not os.path.exists(specific_output_directory):
os.mkdir(specific_output_directory)
sound_files_containing_text = get_relevant_sound_files(file_set, utterance_text)
speaker_id_to_files = get_speaker_to_files_mapping(sound_files_containing_text)
valid_speaker_ids = [speaker_id for speaker_id, files in speaker_id_to_files.items()
if len(files) >= query_count_threshold]
training_count = int(len(valid_speaker_ids)*train_vs_enrollment_fraction)
print(f"Number of training speakers: {training_count}")
print(f"Number of enrollment speakers: {len(valid_speaker_ids) - training_count}")
print(f"Number of total speakers with enough utterances: {len(valid_speaker_ids)}")
random.shuffle(valid_speaker_ids)
valid_speaker_ids = list(OrderedDict.fromkeys(valid_speaker_ids))
# Separate training and test speakers
enrollment_train_ids = valid_speaker_ids[:training_count]
enrollment_test_ids = valid_speaker_ids[training_count:]
print("First enrollment train ID:", enrollment_train_ids[0])
print("First enrollment test ID:", enrollment_test_ids[0])
# For each speaker in enrollment_train, some queries go to training
# and some queries go to dev for parameter optimization
train_files = []
dev_files = []
# For each speaker in test, some queries go to generate the d-vector
# and some go for testing (nearest neighbor)
test_queries_seen = []
test_queries_unseen = []
for speaker_id in enrollment_train_ids:
files = speaker_id_to_files[speaker_id]
random.shuffle(files)
selected_files = files[:query_count_threshold]
train_split = selected_files[:num_train_files_per_speaker]
test_split = selected_files[num_train_files_per_speaker:]
train_files.extend(train_split)
dev_files.extend(test_split)
for speaker_id in enrollment_test_ids:
files = speaker_id_to_files[speaker_id]
random.shuffle(files)
selected_files = files[:query_count_threshold]
train_split = selected_files[:num_train_files_per_speaker]
test_split = selected_files[num_train_files_per_speaker:]
test_queries_seen.extend(train_split)
test_queries_unseen.extend(test_split)
print("Training, dev, enrollment_length, enrollment_test")
print(len(train_files), len(dev_files), len(test_queries_seen), len(test_queries_unseen))
with open(os.path.join(specific_output_directory, "enrollment_train.txt"), 'w') as f:
for item in train_files:
f.write("%s\n" % item)
with open(os.path.join(specific_output_directory, "enrollment_dev.txt"), 'w') as f:
for item in dev_files:
f.write("%s\n" % item)
# Queries from users that the network does not train with, used to generate a corresponding d-vector
with open(os.path.join(specific_output_directory, "enrollment_test_seen.txt"), 'w') as f:
for item in test_queries_seen:
f.write("%s\n" % item)
# Queries used for evaluation on d-vectors
with open(os.path.join(specific_output_directory, "enrollment_test_unseen.txt"), 'w') as f:
for item in test_queries_unseen:
f.write("%s\n" % item)
selected_files = train_files + dev_files
generate_label_dict(selected_files, specific_output_directory, "label_dict_enrollment.npy")
selected_test_files = test_queries_seen + test_queries_unseen
generate_label_dict(selected_test_files, specific_output_directory, "label_dict_enrollment_test.npy")
# Save the training and enrollment users
with open(os.path.join(specific_output_directory, "enrollment_train_ids.p"), 'wb') as f:
pickle.dump(enrollment_train_ids, f)
with open(os.path.join(specific_output_directory, "enrollment_test_ids.p"), 'wb') as f:
pickle.dump(enrollment_test_ids, f)
print("Training speakers",
get_unique_items(os.path.join(specific_output_directory, "label_dict_enrollment.npy")))
print("Enroll speakers",
get_unique_items(os.path.join(specific_output_directory, "label_dict_enrollment_test.npy")))
def generate_enrollment_list(file_set, utterance_text,
pickle_filepath, unseen, count_threshold, train_count, num_unique_ids):
"""
Given a pickle file of speaker ids, generate enrollment/test lists.
Used for testing enrollment with different ratios than used in training while controlling for using speakers
that the model has seen/not seen.
Args:
file_set:
utterance_text:
pickle_filepath:
unseen:
count_threshold:
train_count:
num_unique_ids:
Returns:
"""
speaker_id_list = pickle.load(open(pickle_filepath, 'rb'))
if unseen:
unseen_str = "unseen"
else:
unseen_str = "seen"
# Directory name includes threshold/training counts
directory_name = f"only_enrollment_{utterance_text}_{count_threshold}_" \
f"threshold_{train_count}_train_count_{num_unique_ids}_unique_speakers_{unseen_str}"
specific_output_directory = os.path.join(OUTPUT_DIR, directory_name)
if not os.path.exists(specific_output_directory):
os.mkdir(specific_output_directory)
sound_files_containing_text = get_relevant_sound_files(file_set, utterance_text)
speaker_id_to_files = get_speaker_to_files_mapping(sound_files_containing_text)
valid_speaker_ids = [speaker_id for speaker_id, files in speaker_id_to_files.items()
if len(files) >= count_threshold and speaker_id in speaker_id_list]
# Only use a given number of unique speakers
if num_unique_ids > len(valid_speaker_ids):
print("Too many unique ids requested, using all unique speakers available:", len(valid_speaker_ids))
else:
valid_speaker_ids = valid_speaker_ids[:num_unique_ids]
test_queries_seen = []
test_queries_unseen = []
for speaker_id in valid_speaker_ids:
files = speaker_id_to_files[speaker_id]
random.shuffle(files)
selected_files = files[:count_threshold]
train_split = selected_files[:train_count]
test_split = selected_files[train_count:]
test_queries_seen.extend(train_split)
test_queries_unseen.extend(test_split)
print(f"Num queries for enrollment: {len(test_queries_seen)}, Num queries for test: {len(test_queries_unseen)}")
# Queries from users that the network does not train with, used to generate a corresponding d-vector
with open(os.path.join(specific_output_directory, 'enrollment_test_seen.txt'), 'w') as f:
for item in test_queries_seen:
f.write("%s\n" % item)
# Queries used for evaluation on d-vectors
with open(os.path.join(specific_output_directory, 'enrollment_test_unseen.txt'), 'w') as f:
for item in test_queries_unseen:
f.write("%s\n" % item)
# Create label dict
selected_test_files = test_queries_seen + test_queries_unseen
generate_label_dict(selected_test_files, specific_output_directory, "label_dict_enrollment_test.npy")
print("Output folder")
print(directory_name)
if __name__ == '__main__':
# UTTERANCE_COUNT_THRESHOLD = 10 # Min utterances for a speaker to be used
# UTTERANCE_TRAIN_COUNT = 6 # Number of files to use for training/enrollment vector generation depending
ENROLLMENT_TRAIN_FRACTION = 0.8
# random.seed(UTTERANCE_COUNT_THRESHOLD*UTTERANCE_TRAIN_COUNT)
file_set = get_all_files_recursive('/mnt/extradrive2/wakeword_data/')
# get_softmax_training_data_lists(file_set, "okay_webex", 30, 25)
# get_enrollment_training_data_lists(file_set, "okay_webex", 30, 25, .8)
UNIQUE_SPEAKERS = 100
for count, train in zip([30, 15, 10], [25, 10, 6]):
random.seed(count*train)
generate_enrollment_list(file_set,
"okay_webex",
"wakeword_file_lists/enrollment_10_threshold_6_train_count/enrollment_test_ids.p",
"True",
count, # Count threshold
train, # Queries used for d-vector generation
UNIQUE_SPEAKERS) # Num unique speakers to use
|
[
"alhuang10@gmail.com"
] |
alhuang10@gmail.com
|
8790627ceaf3ba374592b7991b77b81a46f1e13a
|
28969d0cfe474b5395686fc2f2093eab3dc1c99b
|
/29pygame/squish.py
|
53ca62883f1a83d3638bbf621db435d6b71391bf
|
[] |
no_license
|
strawwhat/pythonbasic
|
cf1a86d2c044b04119120e72fbf7f9eceb941d30
|
01e8b9fcdf7efa6d1fda6cf9b2e5a50c94d38b8f
|
refs/heads/master
| 2021-01-23T00:19:58.216208
| 2017-06-26T15:46:39
| 2017-06-26T15:46:39
| 85,714,746
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,160
|
py
|
#!/usr/bin/python
# *-*coding:utf-8 *-*
"""
29-4 主Game模块(Squish.py)
这个模块包括Squish游戏的主要游戏逻辑
"""
import os, sys, pygame
from pygame.locals import *
import objects, config
class State:
"""泛型游戏的超类,可以处理事件并且在给定的表面上显示自身"""
def handle(self, event):
"""只处理退出事件的默认事件处理"""
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN and event.key == K_ESCAPE:
sys.exit()
def firstDisplay(self, screen):
"""用于第一次显示状态,使用背景颜色填充边界"""
screen.fill(config.Background_color)
#调用flip,让更改可见
pygame.display.flip()
def display(self, screen):
"""
用于在已经显示过一次状态后再次显示。默认的行为是什么都不做
"""
pass
class Level(State):
"""
游戏等级。用于计算已经落下了多少秤砣,移动子图形以及其他和游戏逻辑相关的任务
"""
def __init__(self, number=1):
self.number = number
#本关还要落下多少秤砣?
self.remaining = config.Weights_per_level
speed = config.Drop_speed
#为每个大于1的等级都增加一个speed_increase
speed += (self.number-1) * config.Speed_increase
#创建秤砣和香蕉
self.weight = objects.Weight(speed)
self.banana = objects.Banana()
both = self.weight, self.banana #This could contain more sprites...
self.sprites = pygame.sprite.RenderUpdates(both)
def update(self, game):
"从前一帧更新游戏状态"
#更新所有子图形
self.sprites.update()
#如果香蕉碰到了秤砣,那么告诉游戏切换到GameOver状态
if self.banana.touches(self.weight):
game.nextState = GameOver()
#否则在秤砣落地使将其复位。如果本关内的所有秤砣都落下了,则让游戏切换到LevelCleared状态
elif self.weight.landed:
self.weight.reset()
self.remaining -= 1
if self.remaining == 0:
game.nextState = LevelCleared(self.number)
def display(self, screen):
"""
在第一次显示(只清空屏幕)后显示状态。与firshDisplay不同,这个方法使用
pygame.display.update对self.sprites.draw提供的、需要更新的矩形列表进行更新
"""
screen.fill(config.Background_color)
updates = self.sprites.draw(screen)
pygame.display.update(updates)
class Paused(State):
"""简单的暂停游戏状态。按下键盘上的任意键或点击鼠标都会结束这个状态"""
finished = 0 #用户结束暂停了吗
image = None #如果需要图片的话,将这个变量设定为文件名
text = '' #将它设定为一些提示性文本
def handle(self, event):
"""
用State的handle的方法处理一般退出事件,以及对按键和鼠标点击作出反映来处理事件
如果键被按下或者鼠标被点击,将self.finished设定为真
"""
State.handle(self, event)
if event.type in [MOUSEBUTTONDOWN, KEYDOWN]:
self.finished = 1
def update(self, game):
"""
更新等级。如果按键被按下或者鼠标被点击(比如self.finished为真),那么告诉
游戏切换到下一个由self.nextState()表示的状态(应该由子类实现)
"""
if self.finished:
game.nextState = self.nextState()
def firstDisplay(self, screen):
"""暂停状态的第一次出现,绘制图像(如果有的话)并且生成文本 """
#首先,使用填充背景色的方式清空屏幕
screen.fill(config.Background_color)
#使用默认的外观和指定的大小创建Font对象
font = pygame.font.Font(None, config.font_size)
#获取self.text中的文本行,忽略开头和结尾的空行
lines = self.text.strip().splitlines()
#计算文本的高度(使用font.get_linesize())以获取每行文本的像素高度
height = len(lines) * font.get_linesize()
#计算文本的放置位置(屏幕中心).屏幕高度的一半减去文本高度的一半
center, top = screen.get_rect().center
top -= height // 2 #264
#如果有图片要显示
if self.image:
#载入图片
image = pygame.image.load(self.image).convert()
#获取它的rect
r = image.get_rect()
#将图片向下移动到其高度的一半距离.
top += r.height // 2
#将图片放置在文本上方20像素处.
r.midbottom = center, top-20
#将图片移动到屏幕上
screen.blit(image, r)
antialias = 1 #Smooth the text
black = 0,0,0 #Render it as balck
#生成所有行,从计算过的top开始,并且对于每一行向下移动font.get_linesize()像素
for line in lines:
text = font.render(line.strip(), antialias, black)
r = text.get_rect()
r.midtop = center, top
screen.blit(text, r)
top += font.get_linesize()
#显示所有更改
pygame.display.flip()
class Info(Paused):
"""简单的暂停状态,显示有关游戏的信息,在StartUp后显示"""
nextState = Level
text = '''
In this game you are a banana,
trying to survive a course in
self-defense against fruit. where the
particpents will "defend" whemselves
against you with a 16 ton weight
'''
class StartUp(Paused):
"""显示图片和欢迎信息的暂停状态,在Info状态前显示"""
nextState = Info
image = config.Splash_image
text = '''
Welcome to Squish,
the game of Fruit Self-Defense
'''
class LevelCleared(Paused):
"""提示用户过关的暂停状态。在next Level后显示"""
def __init__(self, number):
self.number = number
self.text = '''Level %i cleared
Click to start next level ''' % self.number
def nextState(self):
return Level(self.number+1)
class GameOver(Paused):
"""提示用户输掉游戏的状态"""
nextState = Level
text = '''
Game Over
Click to Restart, Esc to Quit'''
class Game:
"""负责主事件循环的游戏对象,任务包括在不同状态间切换"""
def __init__(self, *args):
#获取游戏和图像放置的目录
path = os.path.abspath(args[0])
dir = os.path.split(path)[0]
#移动哪个目录(这样图片文件可以在随后打开)
os.chdir(dir)
#无状态方式启动
self.state = None
#在第一个事件循环迭代中移动到StateUp
self.nextState = StartUp()
def run(self):
"""
这个方法动态设定变量,进行一些重要的初始化工作,并且进入主事件循环
"""
pygame.init()#初始化所有pygame模块
#决定以窗口模式还是全屏模式显示游戏
flag = 0
if config.full_screen:
#flag = FULLSCREEN
flag = RESIZABLE
screen_size = config.Screen_size
screen = pygame.display.set_mode(screen_size, flag)
pygame.display.set_caption('Fruit Self Defense')
pygame.mouse.set_visible(False)
#主循环
while True:
#1 如果nextState被修改了,那么移动到新状态,并且显示它(第一次)
if self.state != self.nextState:
self.state = self.nextState
self.state.firstDisplay(screen)
#2代理当前状态的事件处理
for event in pygame.event.get():
self.state.handle(event)
#3 更新当前状态
self.state.update(self)
#4显示当前状态
self.state.display(screen)
if __name__ == '__main__':
game = Game(*sys.argv)
game.run()
|
[
"bnm1122@yeah.net"
] |
bnm1122@yeah.net
|
1a8f004294ff64733c4322dbc48686f5461ec22d
|
c8f508ec84c88ec468db9b6d7989bff78e27fabb
|
/experiments/light_gbm.py
|
810cf522b6f97a64f540e26faf590269b6678ef2
|
[] |
no_license
|
jirivrany/kaggle-malware-detection
|
36cd1e4849485ad03e762c7766b5c178fc4ade01
|
49da5822313b162bb657dd5bc96022eb9c86a0a4
|
refs/heads/master
| 2020-04-12T04:24:26.970941
| 2019-02-18T09:11:26
| 2019-02-18T09:11:26
| 162,294,596
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,673
|
py
|
# coding: utf-8
import warnings
import gc
import pickle
import time
import kaggle
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from datetime import datetime as dt
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn import metrics
"""
E60
- num_thread benchmark
"""
print("script started: ", time.strftime("%b %d %Y %H:%M:%S"))
experinment_nr = 66
train_fname = '../input/train_catboost.pkl.gz'
test_fname = '../input/test_catboost.pkl.gz'
train = pd.read_pickle(train_fname, compression='gzip')
print("TRAIN LOADED")
target = pd.read_pickle('../input/target.pkl.gz', compression='gzip')
true_numerical_columns = [
'Census_ProcessorCoreCount',
'Census_PrimaryDiskTotalCapacity',
'Census_SystemVolumeTotalCapacity',
'Census_TotalPhysicalRAM',
'Census_InternalPrimaryDiagonalDisplaySizeInInches',
'Census_InternalPrimaryDisplayResolutionHorizontal',
'Census_InternalPrimaryDisplayResolutionVertical',
'Census_InternalBatteryNumberOfCharges'
]
binary_variables = [c for c in train.columns if train[c].nunique() == 2]
categorical_columns = [c for c in train.columns if c not in true_numerical_columns]
#max_iter = 3
gc.collect()
print("TRAIN PREPARED")
test = pd.read_pickle(test_fname, compression='gzip')
print("TEST LOADED")
# with open('../input/categoricals.pkl', 'rb') as pickle_file:
# categorical_columns = pickle.load(pickle_file)
#
#print("Categorical columns loaded:", categorical_columns)
gc.collect()
param = {
'num_threads': 27,
'num_leaves': 60,
'min_data_in_leaf': 60,
"boosting": "gbdt",
'objective': 'binary',
"metric": 'auc',
'max_depth': -1,
'learning_rate': 0.2,
"feature_fraction": 0.8,
"bagging_freq": 1,
"bagging_fraction": 0.8,
"bagging_seed": 11,
"lambda_l1": 0.1,
"random_state": 133,
"verbosity": -1
}
max_iter = 3
folds_nr = 3
task_name = '27 cores benchmark'
folds = KFold(n_splits=folds_nr, shuffle=True, random_state=15)
oof = np.zeros(len(train))
categorical_columns = [
c for c in categorical_columns if c not in ['MachineIdentifier']]
features = [c for c in train.columns if c not in ['MachineIdentifier']]
print("task {} started: {}".format(task_name, time.strftime("%b %d %Y %H:%M:%S")))
predictions = np.zeros(len(test))
feature_importance_df = pd.DataFrame()
score = [0 for _ in range(folds.n_splits)]
print("STARTING K-FOLD CV")
for fold_, (trn_idx, val_idx) in enumerate(folds.split(train.values, target.values)):
print("task {} starting fold nr {} at: {}".format(task_name, fold_, time.strftime("%b %d %Y %H:%M:%S")))
trn_data = lgb.Dataset(train.iloc[trn_idx][features],
label=target.iloc[trn_idx],
categorical_feature=categorical_columns
)
val_data = lgb.Dataset(train.iloc[val_idx][features],
label=target.iloc[val_idx],
categorical_feature=categorical_columns
)
num_round = 5200
clf = lgb.train(param,
trn_data,
num_round,
valid_sets=[trn_data, val_data],
verbose_eval=100,
early_stopping_rounds=200)
oof[val_idx] = clf.predict(
train.iloc[val_idx][features], num_iteration=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = features
fold_importance_df["importance"] = clf.feature_importance(
importance_type='gain')
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat(
[feature_importance_df, fold_importance_df], axis=0)
# we perform predictions by chunks
initial_idx = 0
chunk_size = 1000000
current_pred = np.zeros(len(test))
while initial_idx < test.shape[0]:
final_idx = min(initial_idx + chunk_size, test.shape[0])
idx = range(initial_idx, final_idx)
current_pred[idx] = clf.predict(
test.iloc[idx][features], num_iteration=clf.best_iteration)
initial_idx = final_idx
predictions += current_pred / min(folds.n_splits, max_iter)
score[fold_] = metrics.roc_auc_score(target.iloc[val_idx], oof[val_idx])
print("task {} finished fold nr {} at: {}".format(task_name, fold_, time.strftime("%b %d %Y %H:%M:%S")))
if fold_ == max_iter - 1:
break
print("task {} finished 3 FOLDS: {}".format(task_name, time.strftime("%b %d %Y %H:%M:%S")))
if (folds.n_splits == max_iter):
cv_score = metrics.roc_auc_score(target, oof)
else:
cv_score = sum(score) / max_iter
cv_score_printable = "{:<8.5f}".format(cv_score)
print("CV score: {}".format(cv_score_printable))
cv_score_printable = cv_score_printable.replace(".", "")
cv_score_printable = cv_score_printable.strip()
# Feature importance
cols = (feature_importance_df[["feature", "importance"]]
.groupby("feature")
.mean()
.sort_values(by="importance", ascending=False)[:1000].index)
best_features = feature_importance_df.loc[
feature_importance_df.feature.isin(cols)]
plt.figure(figsize=(14, 25))
sns.barplot(x="importance",
y="feature",
data=best_features.sort_values(by="importance",
ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig(
'../img/e{}_lgbm_importances_{}.png'.format(experinment_nr, cv_score_printable))
feature_importance_df.to_csv(
'../EDA/e{}_lgbm_importances_{}.csv'.format(experinment_nr, cv_score_printable))
# submit predictions
sub_df = pd.read_csv('../input/sample_submission.csv')
sub_df["HasDetections"] = predictions
model_dir = '../output'
model_name = 'submit_e{}_cv{}_{}.csv.gz'.format(
experinment_nr, cv_score_printable, dt.now().strftime('%Y-%m-%d-%H-%M'))
fname = os.path.join(model_dir, model_name)
param_string = ', '.join(('{}: {}'.format(k, v) for k, v in param.items()))
message = 'CV: {} DATA: {} LGBM params: {}'.format(
cv_score_printable, train_fname, param_string)
competition = 'microsoft-malware-prediction'
sub_df.to_csv(fname, compression='gzip', index=False)
#kaggle.api.competition_submit(os.path.abspath(fname), message, competition)
print("task {} finished: {}".format(task_name, time.strftime("%b %d %Y %H:%M:%S")))
print("script finished: ", time.strftime("%b %d %Y %H:%M:%S"))
|
[
"jiri.vrany@tul.cz"
] |
jiri.vrany@tul.cz
|
96fd8f586922bfca585450a5943f4a0948e56b09
|
b42ccc28946274e91d1b91dc778f1ec76724ea5f
|
/authProj/settings.py
|
d6e28108755f7977598b4612955293c7379019c8
|
[] |
no_license
|
vlad508/authProjBack
|
30cc90f332cbce85842efaecc52484b01a9e3d30
|
536641b3809bc410b7cbe329558963c83e5b793e
|
refs/heads/master
| 2022-11-08T14:36:48.597109
| 2020-06-12T07:48:33
| 2020-06-16T08:32:51
| 271,738,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,848
|
py
|
"""
Django settings for authProj project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j__99bn%&si@u9kzuhfa-ztzxa3ilu52tsjh)t5$1^sn_7dxm3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '161.35.199.210']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'authProj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'authProj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'myproject',
'USER': 'myprojectuser',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': ''
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = [
os.path.join(BASE_DIR, 'django_blog/static')
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
],
}
|
[
"vlad508@gmail.com"
] |
vlad508@gmail.com
|
ccdee4252fa5b289fd38d1f0e47ebc13bf975a80
|
0f900a22552d9b94c1c86784e76e3935a1e7bdde
|
/IssueBook.py
|
967ed2f0b21965d0882d64b4218f83578b2ebf96
|
[] |
no_license
|
Mini-Bong/LMS_Python_Tkinter
|
c24ef6a37015ca94bbec65c2559b6420d68ac5ba
|
7a3ff836eff0156eea65aa94b849c0b0a23f9028
|
refs/heads/master
| 2023-03-24T05:15:00.155730
| 2021-03-21T06:11:59
| 2021-03-21T06:11:59
| 308,681,642
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,531
|
py
|
import tkinter as tk
from tkinter import *
import pymysql
from PIL import ImageTk, Image
from tkinter import messagebox
dbName = "LMSdb"
empTable = "empdetail"
stuTable = "studetail"
bookTable = "books"
issueBookTable = "issueBookDetail"
count = 0
connector = pymysql.connect(host ='localhost', user = 'root', database = dbName)
cur = connector.cursor()
allRollNum = []
allEmpId = []
allBookId =[]
def issue():
global issueBtn, labelFrame, lb1, en1, en2, en3, quitBtn, window, Canvas1, status
book_id = en1.get()
issue_to = en2.get()
issue_be = en3.get()
issueBtn.destroy()
quitBtn.destroy()
labelFrame.destroy()
lb1.destroy()
en1.destroy()
en2.destroy()
en3.destroy()
extract_book_id = "select bookId from "+bookTable
try:
cur.execute(extract_book_id)
connector.commit()
for i in cur:
allBookId.append(i[0])
if book_id in allBookId:
checkAvail = "select status from "+bookTable+" where bookId = '"+book_id+"'"
cur.execute(checkAvail)
connecter.commit()
for i in cur:
check = i[0]
def issuebook():
global en1, en2, en3, issueBtn, lb1, labelFrame, quitBtn, Canvas1, window, status
window = Tk()
window.title("Library")
window.minsize(width = 400, height = 400)
window.geometry("600x600")
same = True
background_image =Image.open("image.jpg")
[imageSizeWidth, imageSizeHeight] = background_image.size
imageSizeWidth = int(imageSizeWidth/5)
imageSizeHeight = int(imageSizeHeight/5)
background_image = background_image.resize((imageSizeWidth,imageSizeHeight),Image.ANTIALIAS)
Canvas1 = Canvas(window)
Canvas1.config(bg="#706fd3",width = imageSizeWidth, height = imageSizeHeight)
Canvas1.pack(expand=True,fill=BOTH)
labelFrame = Frame(window, bg = 'black')
labelFrame.place(relx = 0.1, rely = 0.3, relwidth = 0.8, relheight = 0.3)
headingframe1 = Frame(window, bg = '#333945', bd = 5)
headingframe1.place(relx = 0.25, rely = 0.1, relwidth = 0.5, relheight = 0.13)
headingframe2 = Frame(headingframe1, bg = '#EAF0F1')
headingframe2.place(relx = 0.01, rely = 0.05, relwidth = 0.98, relheight = 0.9)
headingLabel = Label(headingframe2, text = 'ISSUE BOOK', fg = 'black')
headingLabel.place(relx = 0.25, rely = 0.15, relwidth = 0.5, relheight = 0.5)
#Book id
lb1 = Label(labelFrame, text = 'Book Id: ', bg = 'black', fg = 'white')
lb1.place(relx= 0.05, rely = 0.2)
en1 = Entry(labelFrame)
en1.place(relx = 0.3, rely = 0.2, relwidth = 0.62)
#issue to roll number
lb2 = Label(labelFrame, text = 'Issued to(roll number): ' ,bg ='black', fg = 'white')
lb2.place(relx = 0.05, rely = 0.4)
en2 = Entry(labelFrame)
en2.place(relx = 0.3, rely = 0.4, relwidth = 0.62)
#issue book employes number
lb3 = Label(labelFrame, text = "Issued by(EmailId): ", bg = 'black', fg = 'white')
lb3.place(relx = 0.05, rely = 0.6)
en3 = Entry(labelFrame)
en3.place(relx = 0.3, rely = 0.6, relwidth = 0.62)
#Issue button
issueBtn = Button(window, text = 'Issue', bg = '#d1ccc0', fg = 'black', command = issue)
issueBtn.place(relx = 0.28, rely = 0.75, relwidth = 0.18, relheight = 0.08)
#quit button
quitBtn = Button(window, text = 'Quit', bg = '#aaa69d', fg = 'black', command = window.quit)
quitBtn.place(relx =0.53, rely = 0.75, relwidth = 0.18, relheight = 0.08)
issuebook()
window.mainloop()
|
[
"mg550210@gmail.com"
] |
mg550210@gmail.com
|
65130fb22e04cf64a527af5eee14abb9c660c809
|
880b795f6aab85f0f7156127842368cfab46969e
|
/Python/25_libreria_requests.py
|
7b5895b2377e9dd6ed2890b6e27326b4090fada5
|
[] |
no_license
|
jovannygomez/CursoLeonEoiPythonDjango
|
0d3c40ef50d6c6954d5c63dfc63c772d4705ee5d
|
346e0e95df71e9fbeee678d4592e2205eabec5be
|
refs/heads/master
| 2021-08-31T04:45:31.254196
| 2017-12-20T12:01:55
| 2017-12-20T12:01:55
| 114,881,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
"""
La libreria requests simplifica mucho el trabajo con llamadas y respuestas http
https://
"""
import requests
response = requests.get('https://httpbin.org/ip')
ip = response.json()['origin']
print('tu ip es', ip)
response = requests.get('https://swapi.co/api/people/')
people = response.json()['results']
for person in people:
print(person['name'])
|
[
"jovanny-145@outlook.com"
] |
jovanny-145@outlook.com
|
4b335c4d07a217713be2e5111abdcd20636a6b37
|
326142a582c8864240064692a6500dc12da91697
|
/73_Set_Matrix_Zeroes.py
|
2b600d488345bac320111b72d29f3cab61e917b7
|
[] |
no_license
|
qscez2001/leetcode
|
960e775f3c7190ea2f2c3078a25714aafaf8801b
|
f124203c13b2e539acc7a863ec9b1a56363b1f96
|
refs/heads/master
| 2022-02-26T03:35:14.308765
| 2022-02-07T07:22:48
| 2022-02-07T07:22:48
| 238,398,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
'''
Given an m x n matrix. If an element is 0, set its entire row and column to 0. Do it in-place.
Follow up:
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
Example 1:
Input: matrix = [[1,1,1],[1,0,1],[1,1,1]]
Output: [[1,0,1],[0,0,0],[1,0,1]]
Example 2:
Input: matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
Output: [[0,0,0,0],[0,4,5,0],[0,3,1,0]]
'''
def setZeroes(matrix):
list_i = []
list_j = []
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j] == 0:
list_i.append(i)
list_j.append(j)
# print(list_i)
# print(list_j)
for i in list_i:
matrix[i] = [0] * len(matrix[i])
for j in list_j:
for i in range(len(matrix)):
matrix[i][j] = 0
matrix = [[1,1,1],[1,0,1],[1,1,1]]
setZeroes(matrix)
print(matrix)
matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
setZeroes(matrix)
print(matrix)
|
[
"qscez2001@gmail.com"
] |
qscez2001@gmail.com
|
3bc801af96cf998efd961d2ff892da8cd5f95e93
|
3de11c5630cad4ca816ad17dd2f6c743b8799108
|
/djangorestframework/tutorial/tutorial/settings.py
|
57a3ef605fb5ea039f858ff6b08cc8fa7ff71296
|
[] |
no_license
|
greenfrog82/TIL_Python
|
a6f03b0ae6f2260310faa5ef59d4bd01dcf6a1ed
|
015116c5ff4a14f531e3693f9cfd3a921a674b81
|
refs/heads/master
| 2022-12-09T22:34:49.485937
| 2021-05-11T10:59:41
| 2021-05-11T10:59:41
| 154,969,150
| 0
| 1
| null | 2022-12-08T01:20:11
| 2018-10-27T13:44:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,216
|
py
|
"""
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import datetime
from django.core.management.utils import get_random_secret_key
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%pt7&cwica7@md!culsrv)0u)v$p*)ivk2-w5&lgv^5&2q5h7%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
# 'allauth',
# 'allauth.account',
'rest_auth.registration',
'snippets.apps.SnippetsConfig',
# 'users.apps.UsersConfig',
# 'comment.apps.CommnetConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
# JWT_AUTH = {
# 'JWT_EXPIRATION_DELTA': datetime.timedelta(minutes=15),
# }
CUSTOM_USER_CONFIG = {
'PAGINATION_INFO': {
'PAGE_SIZE': 5,
'MAX_PAGE_SIZE': 10000
}
}
# For django-rest-auth
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 1
ACCOUNT_EMAIL_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_VERIFICATION = 'optional'
# For Hash ID
HASHID_FIELD_SALT = get_random_secret_key()
|
[
"greenfrog82@naver.com"
] |
greenfrog82@naver.com
|
ad848a843635a1019e33e979d3ae7126f685e1b6
|
a9bedca7e85106c92045a8399f5aca009d73ed43
|
/weather/middlewares.py
|
755a7341f46b1f82aa41a7bb5c37c9797b03ff2f
|
[] |
no_license
|
Ruabye/ChinaWeather
|
d6f9506e5825f38ccda32ac943682208450a39f9
|
f7119d5008b5327e7339c1c4b70ac419cc274e71
|
refs/heads/master
| 2022-12-10T15:58:28.888858
| 2020-09-20T00:49:17
| 2020-09-20T00:49:17
| 296,968,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,583
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class WeatherSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class WeatherDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"rye8421bcd@126.com"
] |
rye8421bcd@126.com
|
fe45798e409363ccd46457c0363668f8f1751687
|
b7d818def2a72abf31df102303b59e35ad002ba2
|
/midwares/ml_alert.py
|
abf7855964b5a5a31fb469a01d62e4d285b6dca2
|
[] |
no_license
|
wisehead/python_lib
|
b0f2bd1881cb5398c6cf27ac0431d39689d3aeb3
|
ae9f9252b370a1a74dc60a56db39567770e7f797
|
refs/heads/master
| 2022-09-05T07:48:38.414479
| 2022-08-20T15:01:38
| 2022-08-20T15:01:38
| 134,382,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,862
|
py
|
#!/usr/bin/python
"""
use the python code to fetch the SQLs from suspect_sql_new, then analyze it
with machine learning algorithm. then insert the injection SQL into ml_ids_event_new.
Then the alert perl script will read it out and send it to security department.
"""
# -*- coding: UTF-8 -*-
import MySQLdb
from socket import *
#import socket
import time
import commands
CODE_VERSION = "1.0.0.5"
# fix-me: machine learning server is hard coded here, not good.
HOST = '127.0.0.1'
PORT = 21566
BUFSIZ = 1024
ADDR = (HOST, PORT)
always_true = 1
while (always_true == 1):
# open the database connection.
# fix-me: database config is hard coded here, which is not good.
db = MySQLdb.connect(host="127.0.0.1", port=xxxx, user="xxx",
passwd="xxxx", db="xxxx")
# use cursor() method to get the cursor.
cursor = db.cursor()
commands.getstatusoutput('touch the_last_scan_id.txt')
file_object = open('the_last_scan_id.txt')
try:
the_last_scan_id_str = file_object.read()
except:
print "the the_last_scan_id.txt is empty."
finally:
file_object.close()
if the_last_scan_id_str == "":
print "the_last_scan_id is empty."
sql = " select auto_increment from information_schema.tables where table_name = \
'suspect_sql_new' and table_schema = 'dbsec_ids_2016'"
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
the_last_scan_id = row[0]
print "the_last_scan_id is: %d" % the_last_scan_id
else:
print "the_last_scan_id is: %s" % the_last_scan_id_str
the_last_scan_id = int(the_last_scan_id_str)
# SQL Query
sql = "select * from suspect_sql_new where id>%d " % the_last_scan_id
try:
# execute the SQL
cursor.execute(sql)
# fetch all the records
results = cursor.fetchall()
for row in results:
id = row[0]
md5 = row[1]
logstash_id = row[2]
alarm_type = row[3]
intrude_time = row[4]
dbhost = row[5]
port = row[6]
user = row[7]
srchost = row[8]
dbname = row[9]
tblname = row[10]
querycount = row[11]
createtime = row[12]
logtype = row[13]
sql_text = row[14]
dba = row[15]
rd = row[16]
status = row[17]
appname = row[18]
op = row[19]
cor_id = row[20]
# print the output
print "intrude_time=%s,sql_text=%s" % \
(intrude_time, sql_text)
if id > the_last_scan_id:
the_last_scan_id = id
try:
tcpCliSock = socket(AF_INET, SOCK_STREAM)
tcpCliSock.connect(ADDR)
tcpCliSock.send(sql_text)
data = tcpCliSock.recv(BUFSIZ)
if not data:
print "Error: socket get no response data"
break
print data
if data == "1":
print "injection"
cursor_insert = db.cursor()
sql_insert = "INSERT INTO ml_ids_event_new(md5,logstash_id,alarm_type, \
intrude_time,dbhost,port,user,srchost,dbname,tblname,querycount,createtime, \
logtype, sql_text,dba,rd,status,appname,op,cor_id) VALUES('%ld', '%s', '%s', \
'%s', '%d', '%d', '%s', '%s', '%s', '%s', '%d', '%s', '%s', '%s', '%s', '%s',\
'%d', '%s', '%s', '%ld')" % \
(md5, logstash_id, 'machine learning', intrude_time, dbhost, port, user, \
srchost, dbname, tblname, querycount, createtime, logtype, sql_text, dba, \
rd, status, appname, op, cor_id)
print "SQL_INSERT is: %s" % sql_insert
try:
cursor_insert.execute('INSERT INTO ml_ids_event_new(md5,alarm_type, \
intrude_time,dbhost,port,user,srchost,dbname,tblname,querycount, \
createtime,logtype, sql_text,dba,rd,status,appname,op,cor_id) \
values("%ld", "%s", "%s", "%d", "%d", "%s", "%s", "%s", "%s", "%d", \
"%s", "%s", "%s", "%s", "%s", "%d", "%s", "%s", "%ld")' % \
(md5, 'machine learning', intrude_time, dbhost, port, user, srchost, \
dbname, tblname, querycount, createtime, logtype, sql_text, dba, rd, \
status, appname, op, cor_id))
# we need to commit the DML. otherwise it might be lost.
db.commit()
except Exception as e:
print 'str(Exception):%s\t', str(Exception)
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print 'e.message:%s\t', e.message
print "Error: insert error"
#roll back if there are any errors.
db.rollback()
else:
print "normal"
tcpCliSock.close()
except:
print "Error: Socket error"
except:
print "Error: unable to fecth data"
print "the_last_scan_id is: %d" % the_last_scan_id
the_last_scan_id_str = str(the_last_scan_id)
file_object = open('the_last_scan_id.txt', 'w')
try:
the_last_scan_id_str = file_object.write(the_last_scan_id_str)
except:
print "write the_last_scan_id.txt failed."
finally:
file_object.close()
#close the database connection.
db.close()
time.sleep(10)
|
[
"alex.chenhui@gmail.com"
] |
alex.chenhui@gmail.com
|
219150e42458beefd2bdfb1d0c2dce9894c0da99
|
509bcad7c66c86d7abb19eacc22a623f8b118017
|
/tornado_study/second_day/second_1_re_url.py
|
aed4c894832100b869d36dd2a3c575773547630b
|
[] |
no_license
|
yanliangchen/IT-notes
|
9da6a2a1fbf142216bb76c27d06f5d3c83453f47
|
0ec660d050b98721702aa3c865443d390bad6a09
|
refs/heads/master
| 2022-12-13T21:47:26.841777
| 2019-10-11T09:37:55
| 2019-10-11T09:37:55
| 139,413,722
| 2
| 2
| null | 2022-11-22T01:57:46
| 2018-07-02T08:27:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
# coding:utf-8
import tornado.web
import tornado.httpserver
import tornado.ioloop
import config
# 数据格式规范
import json
class IndexHandler(tornado.web.RequestHandler):
#通用接口
# 还可以自定义响应报文
# 中间如果没看到的话可能出现304(缓存,not modify )状态码
def set_default_headers(self): # 全局header
self.set_header('Content-Type', 'application/json; charset=UTF-8')
def get(self,*args,**kwargs):
stu_info = {
'name':'你好',
'age':6,
}
# 给回浏览器json格式字符串,响应码为text/html
self.write(json.dumps(stu_info))
# 手动指定响应码
self.set_status(500)
# 重定向302
# 400 请求错误 (详细参见那个word)
class SubjectHandler(tornado.web.RequestHandler):
# 这两个参数能把写的路由给传递过来
def get(self,a,b,*args,**kwargs):
print(a,b)
# write输出缓冲区 写一次加入一次缓冲区
self.write('subject ok')
self.write('index')
class Err404Handler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
self.set_status(404)
# 第一次给做成文字
# self.write('这个页面不在这个地球上了')
# 第二次给 render页面
self.render('404.html')
if __name__ == '__main__':
app = tornado.web.Application(
[
(r'/',IndexHandler),
# 路由 组1:(.+)匹配任意字符1-n个
(r'/subject/(.+)/([0-9A-Za-z]+)',SubjectHandler),
# 自定义404页面
(r'.*',Err404Handler)
],
**config.settings
)
http_server = tornado.httpserver.HTTPServer(app)
http_server.bind(config.port)
http_server.start()
tornado.ioloop.IOLoop.current().start()
|
[
"18518274668@163.com"
] |
18518274668@163.com
|
8e38fa863780ad5366169fad878261030b099615
|
95fce19d57e9e9e72d73bdd6f902b882f4de4713
|
/01_hall_1.py
|
25bd0eed6449d28537b690c304b21f895fe51450
|
[] |
no_license
|
alexmihaila64/Sunfounder_SensorKit_Python_code_for_RaspberryPi
|
af270a4dc2f57b99373406abda88c7492d29a6ab
|
7c218720ee1c83daabe77b705cf8846c3b545558
|
refs/heads/master
| 2021-01-15T14:50:46.154356
| 2015-05-23T04:37:48
| 2015-05-23T04:37:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
HallPin = 11 # pin11 --- hall
LedPin = 12 # pin12 --- led
def setup():
GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location
GPIO.setup(LedPin, GPIO.OUT) # Set LedPin's mode is output
GPIO.setup(HallPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.output(LedPin, GPIO.HIGH) # Set LedPin high(+3.3V) to off led
def loop():
while True:
if GPIO.input(HallPin) == GPIO.LOW:
print '...led on'
GPIO.output(LedPin, GPIO.LOW) # led on
else:
print 'led off...'
GPIO.output(LedPin, GPIO.HIGH) # led off
def destroy():
GPIO.output(LedPin, GPIO.HIGH) # led off
GPIO.cleanup() # Release resource
if __name__ == '__main__': # Program start from here
setup()
try:
loop()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
|
[
"chuanmorison@gmail.com"
] |
chuanmorison@gmail.com
|
ccffdde7de02461543a3f4f909b19626b7520c9f
|
f516b7561b93f640bcb376766a7ecc3440dcbb99
|
/leetcode/easy/add-binary.py
|
a7a66ad52358184d587c15dba4b509ef2bcc902c
|
[
"Apache-2.0"
] |
permissive
|
vtemian/interviews-prep
|
c41e1399cdaac9653c76d09598612f7450e6d302
|
ddef96b5ecc699a590376a892a804c143fe18034
|
refs/heads/master
| 2020-04-30T15:44:42.116286
| 2019-09-10T19:41:41
| 2019-09-10T19:41:41
| 176,928,167
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
if len(b) > len(a):
a, b = b, a
a = a[::-1]
b = b[::-1]
count = 0
remainder = 0
result = ""
while count < len(b):
b_a = a[count]
b_b = b[count]
result += str((int(b_a) + int(b_b) + remainder) % 2)
remainder = (int(b_a) + int(b_b) + remainder) / 2
count += 1
while count < len(a):
b_a = a[count]
result += str((int(b_a) + remainder) % 2)
remainder = (int(b_a) + remainder) / 2
count += 1
if remainder:
result += str(remainder)
return result[::-1]
result = Solution().addBinary('1010', '1011')
print(result)
|
[
"vladtemian@gmail.com"
] |
vladtemian@gmail.com
|
a52a3c4e94bdbe454575ff4da9d26c86f6ef4074
|
e05a68437fbc62eeaebad57f39029a6db9735876
|
/attention/models_attention_bottom_separate.py
|
d89e6efcfa399a1e1f08c5a327ed3d78f0610c69
|
[
"MIT"
] |
permissive
|
OanaMariaCamburu/e-SNLI
|
9883ecb737cc9282608abb2d4d09cd63e05aad4f
|
7b585a3f077fdea899780eb0473940522ae44a2e
|
refs/heads/master
| 2022-05-26T20:02:56.909008
| 2022-04-29T08:21:03
| 2022-04-29T08:21:03
| 154,048,758
| 158
| 31
|
MIT
| 2020-05-02T15:06:52
| 2018-10-21T20:21:07
|
Python
|
UTF-8
|
Python
| false
| false
| 20,031
|
py
|
"""
The initialization only uses the premise and hypothesis embeddings but not the diff_product
"""
import numpy as np
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
sys.path.append("..")
from mutils import get_keys_from_vals, assert_sizes
def array_all_true(arr):
for i in arr:
if i == False:
return False
return True
"""
AttentionDecoder for the explanation
"""
class AttentionDecoder(nn.Module):
def __init__(self, config):
super(AttentionDecoder, self).__init__()
self.decoder_type = config['decoder_type']
self.word_emb_dim = config['word_emb_dim']
self.dec_rnn_dim = config['dec_rnn_dim']
self.enc_rnn_dim = config['enc_rnn_dim']
self.dpout_dec = config['dpout_dec']
self.n_vocab = config['n_vocab']
self.word_index = config['word_index']
self.word_vec = config['word_vec']
self.max_T_decoder = config['max_T_decoder']
self.max_T_encoder = config['max_T_encoder']
self.n_layers_dec = config['n_layers_dec']
# for decoder intial state
self.use_init = config['use_init']
# attention type: dot product or linear layer
self.att_type = config['att_type'] # 'lin' or 'dot'
# whether to visualize attention weights
self.att_hid_dim =config['att_hid_dim']
self.sent_dim = 2 * config['enc_rnn_dim']
if config['encoder_type'] in ["ConvNetEncoder", "InnerAttentionMILAEncoder"]:
self.sent_dim = 4 * self.sent_dim
if config['encoder_type'] == "LSTMEncoder":
self.sent_dim = self.sent_dim / 2
assert self.sent_dim == 4096, str(self.sent_dim)
# TODO: remove this when implemented linear attention
assert self.att_type == 'dot'
self.context_proj = nn.Linear(4 * self.sent_dim, self.dec_rnn_dim)
self.att_ht_proj1 = nn.Sequential(
nn.Linear(self.sent_dim, self.att_hid_dim),
nn.Tanh(),
)
self.att_context_proj1 = nn.Sequential(
nn.Linear(self.dec_rnn_dim, self.att_hid_dim),
nn.Tanh(),
)
self.att_ht_before_weighting_proj1 = nn.Sequential(
nn.Linear(self.sent_dim, self.att_hid_dim),
nn.Tanh(),
)
self.att_ht_proj2 = nn.Sequential(
nn.Linear(self.sent_dim, self.att_hid_dim),
nn.Tanh(),
)
self.att_context_proj2 = nn.Sequential(
nn.Linear(self.dec_rnn_dim, self.att_hid_dim),
nn.Tanh(),
)
self.att_ht_before_weighting_proj2 = nn.Sequential(
nn.Linear(self.sent_dim, self.att_hid_dim),
nn.Tanh(),
)
self.proj_inp_dec = nn.Linear(2 * self.att_hid_dim + self.word_emb_dim, self.dec_rnn_dim)
if self.decoder_type == 'gru':
self.decoder_rnn = nn.GRU(self.dec_rnn_dim, self.dec_rnn_dim, self.n_layers_dec, bidirectional=False, dropout=self.dpout_dec)
else: # 'lstm'
self.decoder_rnn = nn.LSTM(self.dec_rnn_dim, self.dec_rnn_dim, self.n_layers_dec, bidirectional=False, dropout=self.dpout_dec)
# att softmax
self.softmax_att = nn.Softmax(2)
# vocab layer
self.vocab_layer = nn.Linear(self.dec_rnn_dim, self.n_vocab)
def forward(self, expl, enc_out_s1, enc_out_s2, s1_embed, s2_embed, mode, visualize):
# expl: Variable(seqlen x bsize x worddim)
# s1/2_embed: Variable(bsize x sent_dim)
assert mode in ['forloop', 'teacher'], mode
current_T_dec = expl.size(0)
batch_size = expl.size(1)
assert_sizes(s1_embed, 2, [batch_size, self.sent_dim])
assert_sizes(s2_embed, 2, [batch_size, self.sent_dim])
assert_sizes(expl, 3, [current_T_dec, batch_size, self.word_emb_dim])
assert_sizes(enc_out_s1, 3, [self.max_T_encoder, batch_size, 2 * self.enc_rnn_dim])
assert_sizes(enc_out_s2, 3, [self.max_T_encoder, batch_size, 2 * self.enc_rnn_dim])
context = torch.cat([s1_embed, s2_embed, torch.abs(s1_embed - s2_embed), s1_embed * s2_embed], 1).unsqueeze(0)
assert_sizes(context, 3, [1, batch_size, 4 * self.sent_dim])
# init decoder
if self.use_init:
init_0 = self.context_proj(context).expand(self.n_layers_dec, batch_size, self.dec_rnn_dim)
else:
init_0 = Variable(torch.zeros(self.n_layers_dec, batch_size, self.dec_rnn_dim)).cuda()
init_state = init_0
if self.decoder_type == 'lstm':
init_state = (init_0, init_0)
self.decoder_rnn.flatten_parameters()
out_expl = None
state_t = init_state
context = self.context_proj(context)
if mode == "teacher":
for t_dec in range(current_T_dec):
# attention over premise
context1 = self.att_context_proj1(context).permute(1, 0, 2)
assert_sizes(context1, 3, [batch_size, 1, self.att_hid_dim])
inp_att_1 = self.att_ht_proj1(enc_out_s1).transpose(1,0).transpose(2,1)
assert_sizes(inp_att_1, 3, [batch_size, self.att_hid_dim, self.max_T_encoder])
dot_prod_att_1 = torch.bmm(context1, inp_att_1)
assert_sizes(dot_prod_att_1, 3, [batch_size, 1, self.max_T_encoder])
att_weights_1 = self.softmax_att(dot_prod_att_1)
assert_sizes(att_weights_1, 3, [batch_size, 1, self.max_T_encoder])
att_applied_1 = torch.bmm(att_weights_1, self.att_ht_before_weighting_proj1(enc_out_s1).permute(1, 0, 2))
assert_sizes(att_applied_1, 3, [batch_size, 1, self.att_hid_dim])
att_applied_perm_1 = att_applied_1.permute(1, 0, 2)
assert_sizes(att_applied_perm_1, 3, [1, batch_size, self.att_hid_dim])
# attention over hypothesis
context2 = self.att_context_proj2(context).permute(1, 0, 2)
assert_sizes(context2, 3, [batch_size, 1, self.att_hid_dim])
inp_att_2 = self.att_ht_proj2(enc_out_s2).transpose(1,0).transpose(2,1)
assert_sizes(inp_att_2, 3, [batch_size, self.att_hid_dim, self.max_T_encoder])
dot_prod_att_2 = torch.bmm(context2, inp_att_2)
assert_sizes(dot_prod_att_2, 3, [batch_size, 1, self.max_T_encoder])
att_weights_2 = self.softmax_att(dot_prod_att_2)
assert_sizes(att_weights_2, 3, [batch_size, 1, self.max_T_encoder])
att_applied_2 = torch.bmm(att_weights_2, self.att_ht_before_weighting_proj2(enc_out_s2).permute(1, 0, 2))
assert_sizes(att_applied_2, 3, [batch_size, 1, self.att_hid_dim])
att_applied_perm_2 = att_applied_2.permute(1, 0, 2)
assert_sizes(att_applied_perm_2, 3, [1, batch_size, self.att_hid_dim])
input_dec = torch.cat([expl[t_dec].unsqueeze(0), att_applied_perm_1, att_applied_perm_2], 2)
input_dec = nn.Dropout(self.dpout_dec)(self.proj_inp_dec(input_dec))
out_dec, state_t = self.decoder_rnn(input_dec, state_t)
assert_sizes(out_dec, 3, [1, batch_size, self.dec_rnn_dim])
if self.decoder_type == 'lstm':
context = state_t[0]
else:
context = state_t
if out_expl is None:
out_expl = out_dec
else:
out_expl = torch.cat([out_expl, out_dec], 0)
out_expl = self.vocab_layer(out_expl)
assert_sizes(out_expl, 3, [current_T_dec, batch_size, self.n_vocab])
return out_expl
else:
pred_expls = []
finished = []
for i in range(batch_size):
pred_expls.append("")
finished.append(False)
t_dec = 0
word_t = expl[0].unsqueeze(0)
while t_dec < self.max_T_decoder and not array_all_true(finished):
#print "\n\n\n t: ", t_dec
assert_sizes(word_t, 3, [1, batch_size, self.word_emb_dim])
word_embed = torch.zeros(1, batch_size, self.word_emb_dim)
# attention over premise
context1 = self.att_context_proj1(context).permute(1, 0, 2)
assert_sizes(context1, 3, [batch_size, 1, self.att_hid_dim])
inp_att_1 = self.att_ht_proj1(enc_out_s1).transpose(1,0).transpose(2,1)
assert_sizes(inp_att_1, 3, [batch_size, self.att_hid_dim, self.max_T_encoder])
dot_prod_att_1 = torch.bmm(context1, inp_att_1)
assert_sizes(dot_prod_att_1, 3, [batch_size, 1, self.max_T_encoder])
att_weights_1 = self.softmax_att(dot_prod_att_1)
assert_sizes(att_weights_1, 3, [batch_size, 1, self.max_T_encoder])
att_applied_1 = torch.bmm(att_weights_1, self.att_ht_before_weighting_proj1(enc_out_s1).permute(1, 0, 2))
assert_sizes(att_applied_1, 3, [batch_size, 1, self.att_hid_dim])
att_applied_perm_1 = att_applied_1.permute(1, 0, 2)
assert_sizes(att_applied_perm_1, 3, [1, batch_size, self.att_hid_dim])
# attention over hypothesis
context2 = self.att_context_proj2(context).permute(1, 0, 2)
assert_sizes(context2, 3, [batch_size, 1, self.att_hid_dim])
inp_att_2 = self.att_ht_proj2(enc_out_s2).transpose(1,0).transpose(2,1)
assert_sizes(inp_att_2, 3, [batch_size, self.att_hid_dim, self.max_T_encoder])
dot_prod_att_2 = torch.bmm(context2, inp_att_2)
assert_sizes(dot_prod_att_2, 3, [batch_size, 1, self.max_T_encoder])
att_weights_2 = self.softmax_att(dot_prod_att_2)
assert_sizes(att_weights_2, 3, [batch_size, 1, self.max_T_encoder])
att_applied_2 = torch.bmm(att_weights_2, self.att_ht_before_weighting_proj2(enc_out_s2).permute(1, 0, 2))
assert_sizes(att_applied_2, 3, [batch_size, 1, self.att_hid_dim])
att_applied_perm_2 = att_applied_2.permute(1, 0, 2)
assert_sizes(att_applied_perm_2, 3, [1, batch_size, self.att_hid_dim])
input_dec = torch.cat([word_t, att_applied_perm_1, att_applied_perm_2], 2)
input_dec = self.proj_inp_dec(input_dec)
#print "att_weights_1[0] ", att_weights_1[0]
#print "att_weights_2[0] ", att_weights_2[0]
# get one visualization from the current batch
if visualize:
if t_dec == 0:
weights_1 = att_weights_1[0]
weights_2 = att_weights_2[0]
else:
weights_1 = torch.cat([weights_1, att_weights_1[0]], 0)
weights_2 = torch.cat([weights_2, att_weights_2[0]], 0)
for ii in range(batch_size):
assert abs(att_weights_1[ii].data.sum() - 1) < 1e-5, str(att_weights_1[ii].data.sum())
assert abs(att_weights_2[ii].data.sum() - 1) < 1e-5, str(att_weights_2[ii].data.sum())
out_t, state_t = self.decoder_rnn(input_dec, state_t)
assert_sizes(out_t, 3, [1, batch_size, self.dec_rnn_dim])
out_t = self.vocab_layer(out_t)
if self.decoder_type == 'lstm':
context = state_t[0]
else:
context = state_t
i_t = torch.max(out_t, 2)[1].data
assert_sizes(i_t, 2, [1, batch_size])
pred_words = get_keys_from_vals(i_t, self.word_index) # array of bs of words at current timestep
assert len(pred_words) == batch_size, "pred_words " + str(len(pred_words)) + " batch_size " + str(batch_size)
for i in range(batch_size):
if pred_words[i] == '</s>':
finished[i] = True
if not finished[i]:
pred_expls[i] += " " + pred_words[i]
word_embed[0, i] = torch.from_numpy(self.word_vec[pred_words[i]])
word_t = Variable(word_embed.cuda())
t_dec += 1
if visualize:
assert weights_1.dim() == 2
assert weights_1.size(1) == self.max_T_encoder
assert weights_2.dim() == 2
assert weights_2.size(1) == self.max_T_encoder
pred_expls = [pred_expls, weights_1, weights_2]
return pred_expls
"""
BLSTM (max/mean) encoder
"""
class BLSTMEncoder(nn.Module):
def __init__(self, config):
super(BLSTMEncoder, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_rnn_dim = config['enc_rnn_dim']
self.pool_type = config['pool_type']
self.dpout_enc = config['dpout_enc']
self.max_T_encoder = config['max_T_encoder']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_rnn_dim, 1,
bidirectional=True, dropout=self.dpout_enc)
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return 'cuda' in str(type(self.enc_lstm.bias_hh_l0.data))
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: Variable(seqlen x bsize x worddim)
sent, sent_len = sent_tuple
#assert_sizes(sent, 3, [self.max_T_encoder, sent.size(1), self.word_emb_dim])
# Sort by length (keep idx)
sent_len, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, Variable(idx_sort))
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len)
self.enc_lstm.flatten_parameters()
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
padding_value = 0.0
if self.pool_type == "max":
padding_value = -100
sent_output_padding = nn.utils.rnn.pad_packed_sequence(sent_output, False, padding_value)[0]
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output, False, 0)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, Variable(idx_unsort))
sent_output_padding = sent_output_padding.index_select(1, Variable(idx_unsort))
# Pooling
if self.pool_type == "mean":
sent_len = Variable(torch.FloatTensor(sent_len)).unsqueeze(1).cuda()
emb = torch.sum(sent_output_padding, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
emb = torch.max(sent_output_padding, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2, "emb.ndimension()=" + str(emb.ndimension())
# pad with zeros so that max length is the same for all, needed for attention
if sent_output.size(0) < self.max_T_encoder:
pad_tensor = Variable(torch.zeros(self.max_T_encoder - sent_output.size(0), sent_output.size(1), sent_output.size(2)).cuda())
sent_output = torch.cat([sent_output, pad_tensor], 0)
return sent_output, emb
def set_glove_path(self, glove_path):
self.glove_path = glove_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
if tokenize:
from nltk.tokenize import word_tokenize
sentences = [s.split() if not tokenize else word_tokenize(s)
for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict['<s>'] = ''
word_dict['</s>'] = ''
return word_dict
def get_glove(self, word_dict):
assert hasattr(self, 'glove_path'), \
'warning : you need to set_glove_path(glove_path)'
# create word_vec with glove vectors
word_vec = {}
with open(self.glove_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found {0}(/{1}) words with glove vectors'.format(
len(word_vec), len(word_dict)))
return word_vec
def get_glove_k(self, K):
assert hasattr(self, 'glove_path'), 'warning : you need \
to set_glove_path(glove_path)'
# create word_vec with k first glove vectors
k = 0
word_vec = {}
with open(self.glove_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in ['<s>', '</s>']:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in ['<s>', '</s>']]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'glove_path'), 'warning : you need \
to set_glove_path(glove_path)'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_glove(word_dict)
print('Vocab size from within BLSTMEncoder : {0}'.format(len(self.word_vec)))
# build GloVe vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'glove_path'), 'warning : you need \
to set_glove_path(glove_path)'
self.word_vec = self.get_glove_k(K)
print('Vocab size : {0}'.format(K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'glove_path'), 'warning : you need \
to set_glove_path(glove_path)'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_glove(word_dict)
self.word_vec.update(new_word_vec)
print('New vocab size : {0} (added {1} words)'.format(
len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
if tokenize:
from nltk.tokenize import word_tokenize
sentences = [['<s>'] + s.split() + ['</s>'] if not tokenize else
['<s>']+word_tokenize(s)+['</s>'] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without glove vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "{0}" (idx={1}) have glove vectors. \
Replacing by "</s>"..'.format(sentences[i], i))
s_f = ['</s>']
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : {0}/{1} ({2} %)'.format(
n_wk, n_w, round((100.0 * n_wk) / n_w, 2)))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = Variable(self.get_batch(
sentences[stidx:stidx + bsize]), volatile=True)
if self.is_cuda():
batch = batch.cuda()
batch = self.forward(
(batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : {0} sentences/s ({1} mode, bsize={2})'.format(
round(len(embeddings)/(time.time()-tic), 2),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
if tokenize:
from nltk.tokenize import word_tokenize
sent = sent.split() if not tokenize else word_tokenize(sent)
sent = [['<s>'] + [word for word in sent if word in self.word_vec] +
['</s>']]
if ' '.join(sent[0]) == '<s> </s>':
import warnings
warnings.warn('No words in "{0}" have glove vectors. Replacing \
by "<s> </s>"..'.format(sent))
batch = Variable(self.get_batch(sent), volatile=True)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0*n/np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
"""
Main module for Natural Language Inference
"""
class eSNLIAttention(nn.Module):
def __init__(self, config):
super(eSNLIAttention, self).__init__()
self.encoder_type = config['encoder_type']
self.encoder = eval(self.encoder_type)(config)
self.decoder = AttentionDecoder(config)
def forward(self, s1, s2, expl, mode, visualize):
# s1 : (s1, s1_len)
# s2 : (s2, s2_len)
# expl : Variable(T x bs x 300)
u, u_emb = self.encoder(s1) # u = max_T_enc x bs x (2 * enc_dim) ; u_emb = 1 x bs x (2 * enc_dim)
v, v_emb = self.encoder(s2)
out_expl = self.decoder(expl, u, v, u_emb, v_emb, mode, visualize)
return out_expl
def encode(self, s1):
emb = self.encoder(s1)
return emb
|
[
"oanuru@dgx1.cs.ox.ac.uk"
] |
oanuru@dgx1.cs.ox.ac.uk
|
609cf96a13149dacdc0965276d4fdf3f6c48b7ad
|
31de1eaaaa6baf1b6b54543a895b4fbab8b27703
|
/build/rosserial/rosserial_python/catkin_generated/pkg.installspace.context.pc.py
|
45a938f9779c9acb5bdb702aba86afc1ed7022d6
|
[] |
no_license
|
PHSCRC/Ogrebot-ROS
|
dade38eeb0a08958e448427248f38275a18186dc
|
afde00120d16ffa45426555b9788da5291946284
|
refs/heads/master
| 2020-04-17T11:53:54.648548
| 2019-03-03T23:28:24
| 2019-03-03T23:28:24
| 166,558,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_python"
PROJECT_SPACE_DIR = "/home/parallels/catkin_ws/install"
PROJECT_VERSION = "0.8.0"
|
[
"stephmorel8910@gmail.com"
] |
stephmorel8910@gmail.com
|
21e89041981492ca1efd8f1c75f95ab846b97378
|
6d84ba122ccd5e49aef98d56e07ce5c4fdc4f068
|
/blueprints/auth.py
|
e8a37760bcdf7c2665952899be7fbee501fc54ea
|
[] |
no_license
|
accalina/divingclass
|
0f828deea89b25f3066a3ef128a8716586c4a7eb
|
04f686456896d2726f6ef217fac882ef679575ab
|
refs/heads/master
| 2020-06-25T02:46:07.366952
| 2019-07-29T07:01:10
| 2019-07-29T07:01:10
| 199,175,219
| 0
| 0
| null | 2019-07-29T07:01:11
| 2019-07-27T14:30:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,066
|
py
|
from flask import Blueprint, render_template, request, session, redirect
from model.model import Model
auth = Blueprint("auth", __name__, static_folder="../static", template_folder="../templates")
m = Model()
# ENDPOINT --------------------------------------+
@auth.route("/")
@auth.route("/index")
def index():
if 'username' in session:
if session['level'] == 9:
return redirect("/admin")
else:
return redirect("/dashboard")
else:
return redirect("/login?msg=you must Login to continue")
@auth.route("/login", methods=["GET", "POST"])
def login():
if 'username' not in session:
if request.method == "GET":
msg = request.args.get("msg", "")
return render_template("auth/login.html", data={'msg': msg})
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
result = m.login(username, password)
if result != False:
session['userid'] = result[0]['userid']
session['username'] = (result[0]['username']).capitalize()
session['fullname'] = result[0]['fullname']
session['level'] = result[0]['level']
session['cert'] = result[0]['cert']
return redirect("/index")
else:
return redirect(f"/login?msg=Invalid Username / Password")
else:
return redirect("/index")
@auth.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
redirect("/login")
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
fullname = request.form.get("fullname")
m.register(username, password, fullname)
return redirect(f"/login?msg=user {username} has been created")
@auth.route("/logout")
def logout():
session.clear()
return render_template("auth/login.html", data={'msg': 'you have logged out'})
|
[
"ikhsan.b@jublia.com"
] |
ikhsan.b@jublia.com
|
ccdc10c3229a96f799ffb8a5d260e71f91cbf311
|
31514484f5722a23bb0bef1d47bce6e1a3958096
|
/OracleOfNumpy/pic2oled.py
|
b1c015f6b31da68534abc2b152a38d3f695f174e
|
[] |
no_license
|
moharkalra/Oracle
|
0ecdf7eab1be7b31190e669c95eca802156aaa10
|
329d6a7930f1c8890f27270a8722878f42ad1024
|
refs/heads/master
| 2022-12-13T21:33:38.824188
| 2020-09-12T13:39:26
| 2020-09-12T13:39:26
| 294,867,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,243
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Picture to OLED screen (pic2oled)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Convert an image to a hexadecimal array for OLED screens.
Usage:
>>> python pic2oled.py <filename>
The script outputs a C declaration of a static array. Code output may be easily
included in source code managed by the Arduino IDE.
See full documentation in README.md
:copyright: (c) 2015 by Jean-Yves VET.
:license: MIT, see LICENSE for more details.
"""
from __future__ import print_function
import sys, re, os
from PIL import Image
############################### Global Variables ###############################
oledWidth = 360 # Number of pixel columns handled by the OLED screen
oledHeight = 480 # Number of pixel rows managed by the OLED screen
threshold = 127 # Below this contrast value, pixel is considered as activated
################################## Functions ###################################
## Check arguments and image dimensions.
# @return An image object if nothing went wrong
def checkArgs():
# Check number of arguments
if len(sys.argv) == 2:
# Try to open the image
try:
im = Image.open(sys.argv[1])
except:
print("Error: unable to open", sys.argv[1], file=sys.stderr)
exit(-1)
# Check image dimensions
width,height = im.size
if (width != oledWidth or height != oledHeight):
print("Error: invalid picture dimensions (",
width, "x", height, "), expected", oledWidth,
"x", oledHeight, file=sys.stderr)
exit(-1)
return im
else :
print("Error: invalid number of arguments", file=sys.stderr)
print("Usage:")
print("python " + sys.argv[0] + " <filename>")
exit(-1)
## Convert pixel values to bytes for OLED screens. In the same column, values in
# consecutive rows (8 by 8) are aggregated in the same byte.
# @param pixels Array containing pixel values
# @return An array containing the converted values
def convert(pixels) :
data = [[0 for x in range(oledHeight)] for x in range(int(oledWidth/8))]
for i in range(int(oledWidth/8)):
for j in range(int(oledHeight)):
for bit in range(8):
data[i][j] |= (pixels[j][i*8 + bit] << bit)
data[i][j] = reverseBits(data[i][j], 8)
return data
def reverseBits(num,bitSize):
# convert number into binary representation
# output will be like bin(10) = '0b10101'
binary = bin(num)
# skip first two characters of binary
# representation string and reverse
# remaining string and then append zeros
# after it. binary[-1:1:-1] --> start
# from last character and reverse it until
# second last character from left
reverse = binary[-1:1:-1]
reverse = reverse + (bitSize - len(reverse))*'0'
# converts reversed binary string into integer
return int(reverse,2)
## Convert image to binary (monochrome).
# @param im A picture opened with PIL.
# @return A binary array
def toBinary(im):
# Allocate array to hold binary values
binary = [[0 for x in range(oledWidth)] for x in range(oledHeight)]
# Convert to binary values by using threshold
for j in range(oledHeight):
for i in range(oledWidth):
value = im.getpixel((i, j))[0]
# Set bit if the pixel contrast is below the threshold value
binary[j][i] = int(value < threshold)
return binary
## Format data to output a string for C array declaration.
# @param data Array containing binary values
# @return A string containing the array formated for C code.
def output(data):
# Generate the output with hexadecimal values
s = "const char [] PROGMEM = {" + '\n'
for j in range(int(oledHeight)):
for i in range(int(oledWidth/8)):
s += format(data[i][j], '#04x') + ", "
if (i%16 == 15):
s += '\n'
s = s[:-3] + '\n};'
return s
#################################### Main ######################################
if __name__ == '__main__':
image = checkArgs()
binary = toBinary(image)
data = convert(binary)
print(output(data))
|
[
"mokamo01@yahoo.com"
] |
mokamo01@yahoo.com
|
88a8e524d1e9289cf9ef43c43ae6bd00e178ef5a
|
a122119df649ee4fe543951c7e702e85b3427670
|
/build/baxter_common/baxter_maintenance_msgs/cmake/baxter_maintenance_msgs-genmsg-context.py
|
d53b14ce640c040fb4d738db0101fde2fe60683b
|
[] |
no_license
|
Sean-Skilling/baxter-project
|
e126de1cf80aabebc176628182a23841b2a1a930
|
e86ff6015aaf9c301de5ef9a92538974319ff2b3
|
refs/heads/master
| 2020-04-11T13:03:33.841852
| 2019-03-27T14:58:13
| 2019-03-27T14:58:13
| 161,802,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/seanskilling/baxter_ws/src/baxter_common/baxter_maintenance_msgs/msg/CalibrateArmData.msg;/home/seanskilling/baxter_ws/src/baxter_common/baxter_maintenance_msgs/msg/CalibrateArmEnable.msg;/home/seanskilling/baxter_ws/src/baxter_common/baxter_maintenance_msgs/msg/TareData.msg;/home/seanskilling/baxter_ws/src/baxter_common/baxter_maintenance_msgs/msg/TareEnable.msg;/home/seanskilling/baxter_ws/src/baxter_common/baxter_maintenance_msgs/msg/UpdateSource.msg;/home/seanskilling/baxter_ws/src/baxter_common/baxter_maintenance_msgs/msg/UpdateSources.msg;/home/seanskilling/baxter_ws/src/baxter_common/baxter_maintenance_msgs/msg/UpdateStatus.msg"
services_str = ""
pkg_name = "baxter_maintenance_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "baxter_maintenance_msgs;/home/seanskilling/baxter_ws/src/baxter_common/baxter_maintenance_msgs/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"2207516s@student.gla.ac.uk"
] |
2207516s@student.gla.ac.uk
|
394f39ade830aa9742866fc64ae4126621679347
|
bb9707ce73f4de70b0a200325fe09f4abffe0c8b
|
/ctfs/own/eko2019/resources/guess_my_number_remotely.py
|
a84a2a90cd0eb522abf5607a5ed40b2655eec7ff
|
[] |
no_license
|
lmkalg/lmkalg.github.io
|
32d5ed00a3415f0e146237de6090797ef4119d08
|
f67480b3ca25f961b006adaa6ee54851cffbc4f6
|
refs/heads/master
| 2022-02-04T18:53:20.933450
| 2022-01-10T02:20:55
| 2022-01-10T02:20:55
| 217,159,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,286
|
py
|
import socket
from threading import Thread
from SocketServer import ThreadingMixIn
import random
import time
import sys
TIME_BETWEEN_ANSWERS = 5
SOCKET_TIMEOUT = 10
MAX_CONNECTIONS = 256
MAX_CONNECTIONS_PER_IP = 10
NUMBER_OF_DIGITS = 5
MAX_AMOUNT_OF_VALID_ATTEMPTS = 4
MAX_AMOUNT_OF_ATTEMPTS = 100
FLAG = "ONA{861c4f67e887dec85292d36ab05cd7a1a7275228}"
connections = {}
total_connections = 0
class MaxAmountOfAttemptsException(Exception):
pass
class MaxAmountOfValidAttemptsException(Exception):
pass
def valid_attempt(attempted_number):
return len(attempted_number) == NUMBER_OF_DIGITS
def remove_numbers_with_repetitions(all_numbers):
return filter(lambda x: len(set(x)) == len(x), all_numbers)
def good_one(my_number, attempted_number,i):
return 1 if my_number[i] == attempted_number[i] else 0
def regular_one(my_number, attempted_number,i):
return 1 if my_number[i] != attempted_number[i] and attempted_number[i] in my_number else 0
def to_int(number):
return int("".join(number))
def remove_ctrl_keys(attempted_number):
"""
remove 0a 0d
"""
return attempted_number.strip()
def play_game(conn):
all_numbers = range(10**(NUMBER_OF_DIGITS-1), 10**NUMBER_OF_DIGITS)
all_numbers = map(lambda x: str(x), all_numbers)
all_numbers = map(lambda x: [y for y in x], all_numbers)
valid_numbers = remove_numbers_with_repetitions(all_numbers)
leftover_numbers = valid_numbers
#Presentation
conn.send("Welcome to DA game. I'll choose a number of {} digits. It cannot not neither start with 0 nor repeat digits. You'll have to guess it. The answer shows first the amount of goods, then the amount of regulars. Remember, a digit is good if it was placed in the correct place. On the other hand a digit is regular if it is inside the number, but it wasn't placed in the correct place. Whenever you're ready just start guessing\nYou have only {} attempts. Good luck!\n".format(NUMBER_OF_DIGITS, MAX_AMOUNT_OF_VALID_ATTEMPTS))
#Choose the number
my_number = random.choice(leftover_numbers)
print "[d] Number is: {}".format(my_number)
sys.stdout.flush()
amount_of_attempts = 0
attempted_number = remove_ctrl_keys(conn.recv(1024))
amount_of_attempts += 1
while not valid_attempt(attempted_number):
conn.send("[W] Invalid format of the attempted number (this attempt doesn't count!). Remember it should be a number of {} digits\n".format(NUMBER_OF_DIGITS))
attempted_number = remove_ctrl_keys(conn.recv(1024))
amount_of_attempts += 1
amount_of_valid_attempts = 0
while amount_of_valid_attempts < MAX_AMOUNT_OF_VALID_ATTEMPTS and amount_of_attempts < MAX_AMOUNT_OF_ATTEMPTS:
amount_of_goods = sum([good_one(my_number, attempted_number, digit_index) for digit_index in xrange(NUMBER_OF_DIGITS)])
amount_of_regulars = sum([regular_one(my_number, attempted_number, digit_index) for digit_index in xrange(NUMBER_OF_DIGITS)])
conn.send("mm let me check...\n")
time.sleep(TIME_BETWEEN_ANSWERS)
if amount_of_goods == NUMBER_OF_DIGITS:
conn.send("YEAHH! You won in the flag is {}\n".format(FLAG))
return
else:
conn.send("{} {}\n".format(amount_of_goods, amount_of_regulars))
attempted_number = remove_ctrl_keys(conn.recv(1024))
amount_of_attempts += 1
while not valid_attempt(attempted_number) and amount_of_attempts < MAX_AMOUNT_OF_ATTEMPTS:
conn.send("[W] Invalid format of the attempted number (this attempt doesn't count!). Remember it should be a number of {} digits\n".format(NUMBER_OF_DIGITS))
attempted_number = remove_ctrl_keys(conn.recv(1024))
amount_of_attempts += 1
amount_of_valid_attempts += 1
if amount_of_attempts == MAX_AMOUNT_OF_ATTEMPTS:
conn.send("Oops! Sorry. {} attempts reached!\n".format(MAX_AMOUNT_OF_ATTEMPTS))
raise MaxAmountOfAttemptsException
elif amount_of_valid_attempts == MAX_AMOUNT_OF_VALID_ATTEMPTS:
conn.send("Oops! Sorry. {} valid attempts reached!\n".format(MAX_AMOUNT_OF_VALID_ATTEMPTS))
raise MaxAmountOfValidAttemptsException
# Multithreaded Python server : TCP Server Socket Thread Pool
class ClientThread(Thread):
def __init__(self,ip,port):
Thread.__init__(self)
self.ip = ip
self.port = port
print "[+] New server socket thread started for " + ip + ":" + str(port)
sys.stdout.flush()
def run(self):
try:
conn.settimeout(SOCKET_TIMEOUT)
play_game(conn)
except socket.timeout:
print("[-] Socket timeout reached!")
conn.send(("[-] Socket timeout reached!"))
except MaxAmountOfAttemptsException:
print("[-] MaxAmount of attempts reached from {}:{}".format(self.ip, self.port))
conn.send(("[-] MaxAmount of attempts reached from {}:{}".format(self.ip, self.port)))
except MaxAmountOfValidAttemptsException:
print("[-] MaxAmount of valid attempts reached from {}:{}".format(self.ip, self.port))
conn.send(("[-] MaxAmount of valid attempts reached from {}:{}".format(self.ip, self.port)))
except Exception as e:
pass
finally:
print '[-] Connection from {}:{} closed!'.format(self.ip, self.port)
sys.stdout.flush()
conn.close()
amount_of_connections = connections[self.ip]
connections.update({self.ip:amount_of_connections-1})
print '[+] Connections: {}'.format(connections)
sys.stdout.flush()
# Multithreaded Python server : TCP Server Socket Program Stub
TCP_IP = '0.0.0.0'
TCP_PORT = 1337
BUFFER_SIZE = 20 # Usually 1024, but we need quick response
tcpServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpServer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print "[+] Server up!"
sys.stdout.flush()
tcpServer.bind((TCP_IP, TCP_PORT))
threads = []
while True:
tcpServer.listen(4)
(conn, (ip,port)) = tcpServer.accept()
print "[+] New connection received from {}:{}".format(ip,port)
sys.stdout.flush()
if sum(connections.values()) >= MAX_CONNECTIONS:
conn.send("Sorry. Max number of connections achieved.")
conn.close()
print "[-] Connection from {}:{} dropped because of Max connections achieved".format(ip, port)
sys.stdout.flush()
continue
if ip in connections.keys():
amount_of_connections = connections.get(ip)
if amount_of_connections < MAX_CONNECTIONS_PER_IP:
connections.update({ip:amount_of_connections+1})
else:
conn.send("Sorry. Max number of connections from your IP reached.")
conn.close()
print "[-] Connection from {}:{} dropped because of Max connections achieved for that ip".format(ip,port)
sys.stdout.flush()
continue
else:
connections.update({ip:1})
print "Connections: {}".format(connections)
sys.stdout.flush()
newthread = ClientThread(ip,port)
newthread.start()
threads.append(newthread)
for t in threads:
t.join()
|
[
"artusopablo@gmail.com"
] |
artusopablo@gmail.com
|
355b9c73805f1266127965145e5a42f596c654e5
|
a8b96689030fc41e4271311e8e44e7e78550750c
|
/t/tensorflow/Ubuntu/tensorflow_1.11.0_gpu_ubuntu_16.04.py
|
99adece71cb56c8f2c1c21cb6202ae38884deba7
|
[
"Apache-2.0"
] |
permissive
|
ppc64le/build-scripts
|
f7dd7f660fb447ecab7e3464fa7ba83c743ef785
|
e10ab4c8d10b591fd23151bfe7a4f5921f45b59c
|
refs/heads/master
| 2023-08-21T12:54:56.145453
| 2023-08-17T05:41:21
| 2023-08-17T05:41:21
| 81,145,663
| 79
| 367
|
Apache-2.0
| 2023-09-14T14:26:59
| 2017-02-06T23:48:15
|
Shell
|
UTF-8
|
Python
| false
| false
| 7,806
|
py
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
#
# Package : TensorFlow
# Version : v1.11.0
# Source repo : https://github.com/tensorflow/tensorflow
# Tested on : docker.io/nvidia/cuda-ppc64le:9.2-cudnn7-devel-ubuntu16.04
# docker container
# Script License: Apache License, Version 2 or later
# Maintainer : William Irons <wdirons@us.ibm.com>
#
# Disclaimer: This script has been tested on given docker container
# ========== using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
# Build script for TensorFlow 1.11.0 with GPU support on PPC64LE
# Run from within the docker container: docker.io/nvidia/cuda-ppc64le:9.2-cudnn7-devel-ubuntu16.04
# Requires nccl_2.2.13-1+cuda9.2_ppc64le.tgz downloaded from https://developer.nvidia.com/nccl/nccl-download
# be in the same directory as the sript
# To run this script
# apt-get update -y
# apt-get install -y python
# python tensorflow_1.11.0_gpu_ubuntu_16.04.py
# The resulting wheel file will be in the tensorflow_pkg subdirectory from where the script was invoked.
import os.path, sys, subprocess
from subprocess import check_call, CalledProcessError
packages = [
'apt-get install -y --no-install-recommends \
openjdk-8-jdk \
wget \
curl \
unzip \
zip \
git \
rsync \
python-dev \
swig \
libatlas-dev \
python-numpy \
libopenblas-dev \
libcurl3-dev \
libfreetype6-dev \
libzmq3-dev \
libhdf5-dev \
g++ \
patch \
python-pip \
python-setuptools \
python-wheel \
python-enum34',
'pip install mock cython',
'pip install --global-option=build_ext \
--global-option=-I/usr/include/hdf5/serial/ \
--global-option=-L/usr/lib/powerpc64le-linux-gnu/hdf5/serial \
h5py',
'pip install keras_applications==1.0.5 keras_preprocessing==1.0.3 --no-deps'
]
# In the Ubuntu 16.04 images, cudnn is placed in system paths. Move them to
# /usr/local/cuda
cudnn = [
'cp -P /usr/include/cudnn.h /usr/local/cuda/include',
'cp -P /usr/lib/powerpc64le-linux-gnu/libcudnn.so /usr/local/cuda/lib64',
'cp -P /usr/lib/powerpc64le-linux-gnu/libcudnn.so.7 /usr/local/cuda/lib64',
'cp -P /usr/lib/powerpc64le-linux-gnu/libcudnn.so.7.2.1 /usr/local/cuda/lib64',
'cp -P /usr/lib/powerpc64le-linux-gnu/libcudnn_static.a /usr/local/cuda/lib64',
'cp -P /usr/lib/powerpc64le-linux-gnu/libcudnn_static_v7.a /usr/local/cuda/lib64',
]
nccl = [
'tar -xzvf nccl_2.2.13-1+cuda9.2_ppc64le.tgz',
'cp cuda/targets/ppc64le-linux/include/nccl.h /usr/local/cuda/include',
'mkdir -p /usr/local/cuda/lib',
'cp cuda/targets/ppc64le-linux/lib/libnccl.so /usr/local/cuda/lib/',
'cp cuda/targets/ppc64le-linux/lib/libnccl.so.2 /usr/local/cuda/lib/',
'cp cuda/targets/ppc64le-linux/lib/libnccl.so.2.2.13 /usr/local/cuda/lib/',
'cp cuda/targets/ppc64le-linux/lib/libnccl_static.a /usr/local/cuda/lib/',
'chmod a+r /usr/local/cuda/include/',
'chmod a+r /usr/local/cuda/lib/',
'ldconfig'
]
bazel = [
'mkdir -p bazel',
'wget https://github.com/bazelbuild/bazel/releases/download/0.15.0/bazel-0.15.0-dist.zip',
'mv bazel-0.15.0-dist.zip bazel/',
'unzip -o bazel/bazel-0.15.0-dist.zip -d bazel/',
'bazel/compile.sh',
'cp bazel/output/bazel /usr/local/bin/'
]
git = [
'rm -rf tensorflow',
'git clone -b v1.11.0 https://github.com/tensorflow/tensorflow',
]
def run_cmd(command):
'''
Run the given command using check_call and verify its return code.
@param str command command to be executed
'''
try:
check_call(command.split())
except CalledProcessError as e:
if command.split()[0] == "rpm":
print('Ignore rpm failure, package is probably already installed')
else:
print('An exception has occurred: {0}'.format(e))
sys.exit(1)
def execute_cmd(list, step):
'''
Execute the given commands using run_cmd function
@param list list commands to be executed
@param step str name of the comand to be executed
'''
print('Step: %s' % (step))
for item in list:
run_cmd(item)
def set_environment():
'''
Create bazelrc file with the necessary settings
Note: Limiting TF_CUDA_COMPUTE_CAPABILITIES to only
GPUs you plan to use will speed build time and decrease
overall install size. See https://developer.nvidia.com/cuda-gpus
for GPU model to Compute Capabilities mapping.
'''
f= open('tensorflow/.bazelrc',"w+")
f.write("build --action_env PYTHON_BIN_PATH='/usr/bin/python'\n\
build --action_env PYTHON_LIB_PATH='/usr/local/lib/python2.7/site-packages'\n\
build --python_path='/usr/bin/python'\n\
build --define with_jemalloc=true\n\
build --define with_hdfs_support=true\n\
build --define with_gcp_support=true\n\
build --define with_aws_support=true\n\
build --define with_kafka_support=true\n\
build:xla --define with_xla_support=false\n\
build:gdr --define with_gdr_support=false\n\
build:verbs --define with_verbs_support=false\n\
build --action_env TF_NEED_OPENCL_SYCL='0'\n\
build --action_env TF_NEED_CUDA='1'\n\
build --action_env TF_CUDA_VERSION='9.2'\n\
build --action_env CUDA_TOOLKIT_PATH='/usr/local/cuda-9.2'\n\
build --action_env CUDNN_INSTALL_PATH='/usr/local/cuda-9.2'\n\
build --action_env TF_CUDNN_VERSION='7'\n\
build --action_env TF_NCCL_VERSION='2'\n\
build --action_env NCCL_INSTALL_PATH='/usr/local/cuda-9.2'\n\
build --action_env TF_CUDA_COMPUTE_CAPABILITIES='3.5,3.7,5.2,6.0,7.0'\n\
build --action_env LD_LIBRARY_PATH='/usr/local/nvidia/lib:/usr/local/nvidia/lib64'\n\
build --action_env TF_CUDA_CLANG='0'\n\
build --action_env GCC_HOST_COMPILER_PATH='/usr/bin/gcc'\n\
build --config=cuda\n\
test --config=cuda\n\
build --define grpc_no_ares=true\n\
build:opt --copt=-mcpu=power8\n\
build:opt --copt=-mtune=power8\n\
build:opt --define with_default_optimizations=true\n\
build --strip=always")
f.close()
def run_build(list, dir):
'''
Execute the given commands in other directory
@param list list commands to be executed
@param dir str directory path
'''
build = subprocess.Popen(list, cwd=dir)
build.wait()
if not build.returncode==0:
print('Exiting due to failure in command: {0}'.format(list))
sys.exit(1)
def ensure_prereq_file_exists(file_name):
'''
Validate a file required by the build is downloaded
a present in the same directory as the build script
@param file_name required file for the build
'''
if not os.path.isfile(file_name):
print('File {0} does not exists, this file is required for the build script'.format(file_name))
sys.exit(1)
def main():
ensure_prereq_file_exists('nccl_2.2.13-1+cuda9.2_ppc64le.tgz')
execute_cmd(packages, 'Intalling necessary Packages')
execute_cmd(cudnn, 'Moving cudnn files')
execute_cmd(nccl, 'Installing nccl')
execute_cmd(bazel, 'Install bazel')
execute_cmd(git, 'Cloning tensorflow')
set_environment()
run_build(['/usr/local/bin/bazel', 'build', '-c', 'opt', '//tensorflow/tools/pip_package:build_pip_package'], './tensorflow/')
run_build(['bazel-bin/tensorflow/tools/pip_package/build_pip_package', '../tensorflow_pkg'], './tensorflow/')
run_build(['pip', 'install', 'tensorflow-1.11.0-cp27-cp27mu-linux_ppc64le.whl'], './tensorflow_pkg/')
if __name__ == "__main__":
main()
|
[
"wdirons@us.ibm.com"
] |
wdirons@us.ibm.com
|
e80ae22a9a4bd5dce59bdc85a6df5f6d5a884108
|
6c61a84b25a1016282cefd8989c96ec01f6f165d
|
/proxy.py
|
4ef7bf9da01fc2638932ea90ddbe91dddb69c361
|
[] |
no_license
|
ariksu/testHub
|
d29117cce0e1ff3c2b90a98ea28bee903f404bfb
|
7bf08eb97b541fce09bc873c1809f30e9ee148e3
|
refs/heads/master
| 2020-04-03T10:54:52.040119
| 2018-10-29T07:36:29
| 2018-10-29T07:36:29
| 155,206,080
| 1
| 0
| null | 2018-10-29T12:12:28
| 2018-10-29T12:12:27
| null |
UTF-8
|
Python
| false
| false
| 4,626
|
py
|
import sys, socket, threading, argparse
def request_handler(buff):
# perform the necesary packet modifications
return buff
def response_handler(buff):
# perform the necessary packet modifications
return buff
def receive_from(connection):
buff = "".encode()
# setting a 2 second timeout
connection.settimeout(2)
try:
# keep reading into the buffer until no more data left or timeout
while True:
data = connection.recv(4096)
if not data:
break
buff += data
except Exception as e:
print(e)
pass
return buff
def hexdump(src, length=16):
result = []
digits = 4 if isinstance(src, str) else 2
for i in range(0, len(src), length):
s = src[i:i + length]
hexa = str(' '.join(["%0*X" % (digits, ord(x)) for x in s])).encode('utf-8')
text = str(''.join([x if 0x20 <= ord(x) < 0x7F else '.' for x in s])).encode('utf-8')
result.append("%04X %-*s %s" % (i, length * (digits + 1), hexa.decode('utf-8'), text.decode('utf-8')))
print('\n'.join(result))
def proxy_handler(client_socket, remote_host, remote_port, receive_first):
# connect to remote host
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host, remote_port))
# receieve data from remote if necessary
if receive_first:
remote_buff = receive_from(remote_socket)
remote_buff = response_handler(remote_buff)
# if there is data to send to the local client, do it
if len(remote_buff):
print("[<==] Sending {0} bytes to local_host".format(len(remote_buff)))
client_socket.send(remote_buff)
# loop: read from local, send to remote, send to local
while True:
# read from local_host
local_buff = receive_from(client_socket)
if len(local_buff):
print("[==>] Received {0} bytes from local_host", len(local_buff))
hexdump(local_buff.decode(errors="ignore"))
# send local buffer to request handler
local_buff = request_handler(local_buff)
# send data to remote host
remote_socket.send(local_buff)
print("[==>] Sent to remote.")
# receive back the response
remote_buff = receive_from(remote_socket)
if len(remote_buff):
print("[<==] Received {0} bytes from remote".format(len(remote_buff)))
hexdump(remote_buff.decode(errors="ignore"))
# send the data to the resposne handler
remote_buff = response_handler(remote_buff)
# send respose back to the local socket
client_socket.send(remote_buff)
print("[<==] Sent to localhost.")
# if no more data on either side, close the connections
if not len(local_buff) or not len(remote_buff):
client_socket.close()
remote_socket.close()
print("[*] No more data. Closing connections.")
break
def server_loop(local_host, local_port, remote_host, remote_port, receive_first):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host, local_port))
except Exception as e:
print("[!!] Failed to listen in {0}:{1}".format(local_host, local_port))
print("[!!] Check for other listening sockets or correct permissons.")
print(e)
sys.exit(0)
print("[*] Listening on {0}:{1}".format(local_host, local_port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# print hte local connection info
print("[==>] Received incoming connection from {0}:{1}".format(addr[0], addr[1]))
# start a thread to talk to the remote host
proxy_thread = threading.Thread(target=proxy_handler,
args=(client_socket, remote_host, remote_port, receive_first))
proxy_thread.start()
def main():
parser = argparse.ArgumentParser(description="a simple TCP proxy tool")
parser.add_argument("local_host", type=str)
parser.add_argument("local_port", type=int)
parser.add_argument("remote_host", type=str)
parser.add_argument("remote_port", type=int)
parser.add_argument("receive_first", type=str)
args = parser.parse_args()
receive_first = True if "True" in args.receive_first else False
# spin up the listening socket
server_loop(args.local_host, args.local_port, args.remote_host, args.remote_port, receive_first)
main()
|
[
"photoelf@gmail.com"
] |
photoelf@gmail.com
|
c4b27d12b6acaf8a7adfe09e0103055bdda334d9
|
d50cfc3b4b458ce237d690e6680c0a97f1e6f4f2
|
/snmn/exp_clevr_snmn/test_net_vqa.py
|
58c7732d7ae3e993701fb4ff7c9a750313df0f24
|
[
"BSD-2-Clause"
] |
permissive
|
sheffier/ml_nlp_vqa
|
d7fc23226a5acbcfe3f6d68533c5f428a12bf625
|
c6b6cb7d2e684636de3b2ab41ebd72d627774137
|
refs/heads/master
| 2021-12-14T00:13:49.656751
| 2021-11-22T12:52:46
| 2021-11-22T12:52:46
| 199,274,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,288
|
py
|
import argparse
import os
import numpy as np
import tensorflow as tf
from models_clevr_snmn.model import Model
from models_clevr_snmn.config import (
cfg, merge_cfg_from_file, merge_cfg_from_list)
from util.clevr_train.data_reader import DataReader
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', required=True)
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
merge_cfg_from_file(args.cfg)
assert cfg.EXP_NAME == os.path.basename(args.cfg).replace('.yaml', '')
if args.opts:
merge_cfg_from_list(args.opts)
# Start session
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.GPU_ID)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=cfg.GPU_MEM_GROWTH)))
# Data files
imdb_file = cfg.IMDB_FILE % cfg.TEST.SPLIT_VQA
data_reader = DataReader(
imdb_file, shuffle=False, one_pass=True, batch_size=cfg.TRAIN.BATCH_SIZE,
vocab_question_file=cfg.VOCAB_QUESTION_FILE, T_encoder=cfg.MODEL.T_ENCODER,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE, load_gt_layout=True,
vocab_layout_file=cfg.VOCAB_LAYOUT_FILE, T_decoder=cfg.MODEL.T_CTRL)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
module_names = data_reader.batch_loader.layout_dict.word_list
# Inputs and model
input_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_feat_batch = tf.placeholder(
tf.float32, [None, cfg.MODEL.H_FEAT, cfg.MODEL.W_FEAT, cfg.MODEL.FEAT_DIM])
model = Model(
input_seq_batch, seq_length_batch, image_feat_batch, num_vocab=num_vocab,
num_choices=num_choices, module_names=module_names, is_training=False)
# Load snapshot
if cfg.TEST.USE_EMV:
ema = tf.train.ExponentialMovingAverage(decay=0.9) # decay doesn't matter
var_names = {
(ema.average_name(v) if v in model.params else v.op.name): v
for v in tf.global_variables()}
else:
var_names = {v.op.name: v for v in tf.global_variables()}
snapshot_file = cfg.TEST.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TEST.ITER)
snapshot_saver = tf.train.Saver(var_names)
snapshot_saver.restore(sess, snapshot_file)
# Write results
result_dir = cfg.TEST.RESULT_DIR % (cfg.EXP_NAME, cfg.TEST.ITER)
vis_dir = os.path.join(
result_dir, 'vqa_%s_%s' % (cfg.TEST.VIS_DIR_PREFIX, cfg.TEST.SPLIT_VQA))
os.makedirs(result_dir, exist_ok=True)
os.makedirs(vis_dir, exist_ok=True)
# Run test
answer_correct, num_questions = 0, 0
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions = []
answer_word_list = data_reader.batch_loader.answer_dict.word_list
pred_file = os.path.join(
result_dir, 'vqa_eval_preds_%s_%s_%08d.txt' % (
cfg.TEST.SPLIT_VQA, cfg.EXP_NAME, cfg.TEST.ITER))
for n_batch, batch in enumerate(data_reader.batches()):
if 'answer_label_batch' not in batch:
batch['answer_label_batch'] = -np.ones(
len(batch['image_feat_batch']), np.int32)
if num_questions == 0:
print('imdb has no answer labels. Using dummy labels.\n\n'
'**The final accuracy will be zero (no labels provided)**\n')
fetch_list = [model.vqa_scores]
answer_incorrect = num_questions - answer_correct
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
run_vis = (
answer_correct < cfg.TEST.NUM_VIS_CORRECT or
answer_incorrect < cfg.TEST.NUM_VIS_INCORRECT)
else:
run_vis = num_questions < cfg.TEST.NUM_VIS
if run_vis:
fetch_list.append(model.vis_outputs)
fetch_list_val = sess.run(fetch_list, feed_dict={
input_seq_batch: batch['input_seq_batch'],
seq_length_batch: batch['seq_length_batch'],
image_feat_batch: batch['image_feat_batch']})
# visualization
if run_vis:
model.vis_batch_vqa(
data_reader, batch, fetch_list_val[-1], num_questions,
answer_correct, answer_incorrect, vis_dir)
# compute accuracy
vqa_scores_val = fetch_list_val[0]
vqa_labels = batch['answer_label_batch']
vqa_predictions = np.argmax(vqa_scores_val, axis=1)
answer_correct += np.sum(vqa_predictions == vqa_labels)
num_questions += len(vqa_labels)
accuracy = answer_correct / num_questions
if n_batch % 20 == 0:
print('exp: %s, iter = %d, accumulated accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions += [answer_word_list[p] for p in vqa_predictions]
with open(os.path.join(
result_dir, 'vqa_results_%s.txt' % cfg.TEST.SPLIT_VQA), 'w') as f:
print('\nexp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
print('exp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions), file=f)
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
with open(pred_file, 'w') as f:
f.writelines([a + '\n' for a in output_predictions])
print('prediction file written to %s' % pred_file)
|
[
"ronghang.hu@gmail.com"
] |
ronghang.hu@gmail.com
|
2cc01863d67ed3e0b978db0deaaacd0dd6373f46
|
d4f98f21a60cba036d82f8184c4395226ec36a12
|
/zipfsong.py
|
723283138a5dd4508c970abd1af5e0c703ffe2ae
|
[] |
no_license
|
nborkowska/spotify
|
ebe353f903732af22713a38f5a327d074ccfa645
|
914ee3f24b803797289e6ea4e3b19032df9fd37b
|
refs/heads/master
| 2021-01-20T01:54:10.122760
| 2013-04-09T15:20:54
| 2013-04-09T15:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
#! /usr/bin/env python
import sys
k, p = map(int, sys.stdin.readline().split())
scores=[]
for i in xrange(k):
data = sys.stdin.readline().split()
score = int(data[0])*(i+1)
scores.append((score, k-i, data[1]))
scores.sort(reverse=True)
for i in xrange(p):
print scores[i][2]
|
[
"nborkowska@gmail.com"
] |
nborkowska@gmail.com
|
290e22b1f6b6de3fc599bd6df9cd64a87cbd8797
|
13df8da61a3c1595fb41f4c6b7d694c738e1bd8b
|
/Day_2_Assignment/d2.py
|
ff101b8a765b8f81764e1ff5fdbe4a1519e443e7
|
[] |
no_license
|
vedantg1000/Python-Zero-to-Hero-Aug-
|
de3071b3a818c495b875daf8c8a4ad16319576cd
|
daf245f5616603d7a83528190f9f975773e298d4
|
refs/heads/main
| 2023-07-11T12:41:26.787588
| 2021-08-24T10:06:24
| 2021-08-24T10:06:24
| 396,869,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
# Day 2 Assignment: Take a string input and print the number of occurrences of each character.
string = input("Enter any String: ")
for x in string:
print(x, "Ocurred", string.count(x), "times")
|
[
"noreply@github.com"
] |
vedantg1000.noreply@github.com
|
72ef91d4df5dfb1686ca6fbeecaa9e4f263501c5
|
0c302024c543b9606a428039c6987c0205c8b3fa
|
/accounts/urls.py
|
70623c0c90c3167127889b0f71638f6be9c92fff
|
[] |
no_license
|
PrrrStar/ProjectNP
|
868c5df7e94f6d683e9374f86b8af3e74526b20f
|
f1d50febdd1884d030efaa5f6ae2bd6056070c38
|
refs/heads/master
| 2023-01-11T04:21:35.263955
| 2020-11-14T02:27:34
| 2020-11-14T02:27:34
| 292,203,596
| 7
| 6
| null | 2020-09-16T17:47:18
| 2020-09-02T06:53:53
|
Python
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
from django.urls import path, include
from .views import *
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
path('user_signup/', user_signup, name='user_signup'),
path('user_login/', user_login, name='user_login'),
path('user_logout/', csrf_exempt(user_logout), name='user_logout'),
path('profile/', user_profile, name= 'user_profile'),
path('myprofile/', edit_user_profile, name = 'edit_user_profile'),
path('delete/', delete_user_profile, name='delete_user_profile'),
]
|
[
"42401897+PrrrStar@users.noreply.github.com"
] |
42401897+PrrrStar@users.noreply.github.com
|
7af112c1920d3187c00607f80751e86a3312d096
|
f8f9f467ebe74b7d663fe8812f617e0639d03ae7
|
/tools/gen_mozc_drawable.py
|
4ad889f54ffdec945bf3311c7c08458acfc1b066
|
[] |
no_license
|
tmatz/unistroke-mozc
|
bf82ddcf49f3ddec41a913f4e28b64359cfc25c4
|
8f06b75a7813ec1344a3e8608a67fb6e07bbe0a6
|
refs/heads/master
| 2021-01-13T01:14:46.829459
| 2013-06-19T10:08:30
| 2013-06-19T10:08:30
| 10,733,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,847
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2010-2013, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities to create Mozc's .pic file from .svg file.
This module provides some utilities to create a .pic file, which is interpreted
in MechaMozc, from .svg files.
.pic file is a Mozc original format to represent vector image data shared by
all android platforms.
Note: another idea was to use binary data which is saved via PictureDrawable
on Android, but it seems that there are some differencces between devices,
and it'd cause an crash error, unfortunately.
"""
__author__ = "hidehiko"
import cStringIO as StringIO
import logging
import optparse
import os
import re
import struct
import sys
from xml.etree import cElementTree as ElementTree
from build_tools import util
# State const values defined in android.R.attr.
STATE_PRESSED = 0x010100a7
STATE_SELECTED = 0x010100a1
STATE_CHECKED = 0x010100a0
DRAWABLE_PICTURE = 1
DRAWABLE_STATE_LIST = 2
DRAWABLE_ANIMATION = 3
CMD_PICTURE_EOP = 0
CMD_PICTURE_DRAW_PATH = 1
CMD_PICTURE_DRAW_POLYLINE = 2
CMD_PICTURE_DRAW_POLYGON = 3
CMD_PICTURE_DRAW_LINE = 4
CMD_PICTURE_DRAW_RECT = 5
CMD_PICTURE_DRAW_CIRCLE = 6
CMD_PICTURE_DRAW_ELLIPSE = 7
CMD_PICTURE_PATH_EOP = 0
CMD_PICTURE_PATH_MOVE = 1
CMD_PICTURE_PATH_LINE = 2
CMD_PICTURE_PATH_HORIZONTAL_LINE = 3
CMD_PICTURE_PATH_VERTICAL_LINE = 4
CMD_PICTURE_PATH_CURVE = 5
CMD_PICTURE_PATH_CONTINUED_CURVE = 6
CMD_PICTURE_PATH_CLOSE = 7
CMD_PICTURE_PAINT_EOP = 0
CMD_PICTURE_PAINT_STYLE = 1
CMD_PICTURE_PAINT_COLOR = 2
CMD_PICTURE_PAINT_SHADOW = 3
CMD_PICTURE_PAINT_STROKE_WIDTH = 4
CMD_PICTURE_PAINT_STROKE_CAP = 5
CMD_PICTURE_PAINT_STROKE_JOIN = 6
CMD_PICTURE_PAINT_SHADER = 7
CMD_PICTURE_SHADER_LINEAR_GRADIENT = 1
CMD_PICTURE_SHADER_RADIAL_GRADIENT = 2
STYLE_CATEGORY_TAG = 128
STYLE_CATEGORY_KEYICON_MAIN = 0
STYLE_CATEGORY_KEYICON_GUIDE = 1
STYLE_CATEGORY_KEYICON_GUIDE_LIGHT = 2
STYLE_CATEGORY_KEYICON_MAIN_HIGHLIGHT = 3
STYLE_CATEGORY_KEYICON_GUIDE_HIGHLIGHT = 4
STYLE_CATEGORY_KEYICON_BOUND = 5
STYLE_CATEGORY_KEYICON_FUNCTION = 6
STYLE_CATEGORY_KEYICON_FUNCTION_DARK = 7
STYLE_CATEGORY_KEYICON_QWERTY_SHIFT_ON_ARROW = 8
STYLE_CATEGORY_KEYICON_QWERTY_CAPS_ON_ARROW = 9
STYLE_CATEGORY_KEYPOPUP_HIGHLIGHT = 10
STYLE_CATEGORY_KEYICON_POPUP_FUNCTION = 11
STYLE_CATEGORY_KEYICON_POPUP_FUNCTION_DARK = 12
# We may be able to reuse same resources for symbol major/minor icons.
STYLE_CATEGORY_SYMBOL_MAJOR = 13
STYLE_CATEGORY_SYMBOL_MAJOR_SELECTED = 14
STYLE_CATEGORY_SYMBOL_MINOR = 15
STYLE_CATEGORY_SYMBOL_MINOR_SELECTED = 16
STYLE_CATEGORY_KEYBOARD_FOLDING_BUTTON_BACKGROUND = 17
# We'll check the category id by reverse sorted order, to resolve prefix match
# confliction.
STYLE_CATEGORY_MAP = sorted(
[
('style-keyicon-main', STYLE_CATEGORY_KEYICON_MAIN),
('style-keyicon-guide', STYLE_CATEGORY_KEYICON_GUIDE),
('style-keyicon-guide-light', STYLE_CATEGORY_KEYICON_GUIDE_LIGHT),
('style-keyicon-main-highlight', STYLE_CATEGORY_KEYICON_MAIN_HIGHLIGHT),
('style-keyicon-guide-highlight',
STYLE_CATEGORY_KEYICON_GUIDE_HIGHLIGHT),
('style-keyicon-bound', STYLE_CATEGORY_KEYICON_BOUND),
('style-keyicon-function', STYLE_CATEGORY_KEYICON_FUNCTION),
('style-keyicon-function-dark', STYLE_CATEGORY_KEYICON_FUNCTION_DARK),
('style-keyicon-qwerty-shift-on-arrow',
STYLE_CATEGORY_KEYICON_QWERTY_SHIFT_ON_ARROW),
('style-keyicon-qwerty-caps-on-arrow',
STYLE_CATEGORY_KEYICON_QWERTY_CAPS_ON_ARROW),
('style-keypopup-highlight', STYLE_CATEGORY_KEYPOPUP_HIGHLIGHT),
('style-keyicon-popup-function',
STYLE_CATEGORY_KEYICON_POPUP_FUNCTION),
('style-keyicon-popup-function-dark',
STYLE_CATEGORY_KEYICON_POPUP_FUNCTION_DARK),
('style-symbol-major', STYLE_CATEGORY_SYMBOL_MAJOR),
('style-symbol-major-selected', STYLE_CATEGORY_SYMBOL_MAJOR_SELECTED),
('style-symbol-minor', STYLE_CATEGORY_SYMBOL_MINOR),
('style-symbol-minor-selected', STYLE_CATEGORY_SYMBOL_MINOR_SELECTED),
('style-keyboard-folding-button-background',
STYLE_CATEGORY_KEYBOARD_FOLDING_BUTTON_BACKGROUND),
],
reverse=True)
# Format of PictureDrawable:
# 1, width, height, {CMD_PICTURE_XXX sequence}.
#
# Format of StateListDrawable:
# 2, [[state_list], drawable]
COLOR_PATTERN = re.compile(r'#([0-9A-Fa-f]{6})')
PIXEL_PATTERN = re.compile(r'(\d+)(px)?')
FLOAT_PATTERN = re.compile(r'^\s*,?\s*([+-]?\d+(?:\.\d*)?(:?e[+-]\d+)?)')
SHADER_PATTERN = re.compile(r'url\(#(.*)\)')
MATRIX_PATTERN = re.compile(r'matrix\((.*)\)')
STOP_COLOR_PATTERN = re.compile(r'stop-color:(.*)')
NO_SHADOW = 0
HAS_SHADOW = 1
HAS_BELOW_SHADOW = 2
class _OutputStream(object):
"""Simple wrapper of output stream by by packing value in big endian."""
def __init__(self, output):
self.output = output
def WriteByte(self, value):
if not (0 <= value <= 255):
logging.critical('overflow')
sys.exit(1)
self.output.write(struct.pack('>B', value & 0xFF))
def WriteInt16(self, value):
if not (0 <= value <= 65535):
logging.critical('overflow')
sys.exit(1)
self.output.write(struct.pack('>H', value & 0xFFFF))
def WriteInt32(self, value):
self.output.write(struct.pack('>I', value & 0xFFFFFFFF))
def WriteFloat(self, value):
# TODO(hidehiko): Because the precision of original values is thousandth,
# so we can compress the data by using fixed-precision values.
self.output.write(struct.pack('>f', value))
def __enter__(self):
self.output.__enter__()
def __exit__(self):
self.output.__exit__()
class MozcDrawableConverter(object):
"""Converter from .svg file to .pic file."""
def __init__(self):
pass
# Definitions of svg parsing utilities.
def _ParseColor(self, color):
"""Parses color attribute and returns int32 value or None."""
if color == 'none':
return None
m = COLOR_PATTERN.match(color)
if not m:
return None
c = int(m.group(1), 16)
if c < 0 or 0x1000000 <= c:
logging.critical('Out of color range: %s', color)
sys.exit(1)
# Set alpha.
return c | 0xFF000000
def _ParseShader(self, color, shader_map):
"""Parses shader attribute and returns a shader name from the given map."""
if color == 'none':
return None
m = SHADER_PATTERN.match(color)
if not m:
return None
return shader_map[m.group(1)]
def _ParsePixel(self, s):
"""Parses pixel size from the given attribute value."""
m = PIXEL_PATTERN.match(s)
if not m:
return None
return int(m.group(1))
def _ConsumeFloat(self, s):
"""Returns one floating value from string.
Args:
s: the target of parsing.
Return:
A tuple of the parsed floating value and the remaining string.
"""
m = FLOAT_PATTERN.search(s)
if not m:
logging.critical('failed to consume float: %s', s)
sys.exit(1)
return float(m.group(1)), s[m.end():]
def _ConsumeFloatList(self, s, num):
"""Parses num floating values from s."""
result = []
for _ in xrange(num):
value, s = self._ConsumeFloat(s)
result.append(value)
return result, s
def _ParseFloatList(self, s):
"""Parses floating list from string."""
s = s.strip()
result = []
while s:
value, s = self._ConsumeFloat(s)
result.append(value)
return result
def _ParseShaderMap(self, node):
if node.tag in ['{http://www.w3.org/2000/svg}g',
'{http://www.w3.org/2000/svg}svg']:
result = {}
for child in node:
result.update(self._ParseShaderMap(child))
return result
if node.tag == '{http://www.w3.org/2000/svg}linearGradient':
element_id = node.get('id')
x1 = float(node.get('x1'))
y1 = float(node.get('y1'))
x2 = float(node.get('x2'))
y2 = float(node.get('y2'))
gradientTransform = node.get('gradientTransform')
if gradientTransform:
m = MATRIX_PATTERN.match(gradientTransform)
(m11, m21, m12, m22, m13, m23) = self._ParseFloatList(m.group(1))
(x1, y1) = (m11 * x1 + m12 * y1 + m13, m21 * x1 + m22 * y1 + m23)
(x2, y2) = (m11 * x2 + m12 * y2 + m13, m21 * x2 + m22 * y2 + m23)
color_list = self._ParseStopList(node)
return { element_id: ('linear', x1, y1, x2, y2, color_list) }
if node.tag == '{http://www.w3.org/2000/svg}radialGradient':
element_id = node.get('id')
cx = float(node.get('cx'))
cy = float(node.get('cy'))
r = float(node.get('r'))
gradientTransform = node.get('gradientTransform')
if gradientTransform:
m = MATRIX_PATTERN.match(gradientTransform)
matrix = self._ParseFloatList(m.group(1))
else:
matrix = None
color_list = self._ParseStopList(node)
return { element_id: ('radial', cx, cy, r, matrix, color_list) }
return {}
def _ParseStyle(self, node, has_shadow, shader_map):
"""Parses style attribute of the given node."""
result = {}
for attr in node.get('style', '').split(';'):
attr = attr.strip()
if not attr:
continue
command, arg = attr.split(':')
if command == 'fill' or command == 'stroke':
shader = self._ParseShader(arg, shader_map)
color = self._ParseColor(arg)
if shader is None and color is None:
if arg != 'none':
logging.error('Unknown pattern: %s', arg)
continue
paint_map = {}
paint_map['style'] = command
if shader is not None:
paint_map['shader'] = shader
if color is not None:
paint_map['color'] = color
paint_map['shadow'] = has_shadow
result[command] = paint_map
continue
if command == 'stroke-width':
paint_map = result['stroke']
paint_map['stroke-width'] = float(arg)
continue
if command == 'stroke-linecap':
paint_map = result['stroke']
paint_map['stroke-linecap'] = arg
continue
if command == 'stroke-linejoin':
paint_map = result['stroke']
paint_map['stroke-linejoin'] = arg
continue
return sorted(result.values(), key=lambda e: e['style'])
def _ParseStopList(self, parent_node):
result = []
for stop in parent_node:
if stop.tag != '{http://www.w3.org/2000/svg}stop':
logging.critical('unknown elem: %s', stop.tag)
sys.exit(1)
offset = float(stop.get('offset'))
color = self._ParseColor(
STOP_COLOR_PATTERN.match(stop.get('style')).group(1))
result.append((offset, color))
return result
# Definition of conversion utilities.
def _ConvertSize(self, tree, output):
node = tree.getroot()
width = self._ParsePixel(node.get('width'))
height = self._ParsePixel(node.get('height'))
if width is None or height is None:
logging.critical('Unknown size')
sys.exit(1)
output.WriteInt16(width)
output.WriteInt16(height)
def _MaybeConvertShadow(self, has_shadow, output):
if has_shadow == HAS_SHADOW:
output.WriteByte(CMD_PICTURE_PAINT_SHADOW)
output.WriteFloat(2.)
output.WriteFloat(0.)
output.WriteFloat(-1.)
output.WriteInt32(0xFF404040)
return
if has_shadow == HAS_BELOW_SHADOW:
output.WriteByte(CMD_PICTURE_PAINT_SHADOW)
output.WriteFloat(4.)
output.WriteFloat(0.)
output.WriteFloat(2.)
output.WriteInt32(0xFF292929)
def _ConvertStyle(self, style, output):
if style == 'fill':
output.WriteByte(CMD_PICTURE_PAINT_STYLE)
output.WriteByte(0)
return
if style == 'stroke':
output.WriteByte(CMD_PICTURE_PAINT_STYLE)
output.WriteByte(1)
return
logging.critical('unknown style: %s', style)
sys.exit(1)
def _MaybeConvertColor(self, color, output):
if color is None:
return
output.WriteByte(CMD_PICTURE_PAINT_COLOR)
output.WriteInt32(color)
def _MaybeConvertShader(self, shader, output):
if shader is None:
return
output.WriteByte(CMD_PICTURE_PAINT_SHADER)
if shader[0] == 'linear':
output.WriteByte(CMD_PICTURE_SHADER_LINEAR_GRADIENT)
for coord in shader[1:5]:
output.WriteFloat(coord)
output.WriteByte(len(shader[5]))
for _, color in shader[5]:
output.WriteInt32(color)
for offset, _ in shader[5]:
output.WriteFloat(offset)
return
if shader[0] == 'radial':
output.WriteByte(CMD_PICTURE_SHADER_RADIAL_GRADIENT)
for coord in shader[1:4]:
output.WriteFloat(coord)
# Output matrix.
if shader[4] is None:
output.WriteByte(0)
else:
output.WriteByte(1)
for coord in shader[4]:
output.WriteFloat(coord)
output.WriteByte(len(shader[5]))
for _, color in shader[5]:
output.WriteInt32(color)
for offset, _ in shader[5]:
output.WriteFloat(offset)
return
logging.critical('unknown shader: %s', shader[0])
sys.exit(1)
def _MaybeConvertStrokeWidth(self, stroke_width, output):
if stroke_width is None:
return
output.WriteByte(CMD_PICTURE_PAINT_STROKE_WIDTH)
output.WriteFloat(stroke_width)
def _MaybeConvertStrokeLinecap(self, stroke_linecap, output):
if stroke_linecap is None:
return
if stroke_linecap == 'round':
output.WriteByte(CMD_PICTURE_PAINT_STROKE_CAP)
output.WriteByte(1)
return
if stroke_linecap == 'square':
output.WriteByte(CMD_PICTURE_PAINT_STROKE_CAP)
output.WriteByte(2)
return
logging.critical('unknown stroke-linecap: %s', stroke_linecap)
sys.exit(1)
def _MaybeConvertStrokeLinejoin(self, stroke_linejoin, output):
if stroke_linejoin is None:
return
if stroke_linejoin == 'round':
output.WriteByte(CMD_PICTURE_PAINT_STROKE_JOIN)
output.WriteByte(1)
return
if stroke_linejoin == 'bevel':
output.WriteByte(CMD_PICTURE_PAINT_STROKE_JOIN)
output.WriteByte(2)
return
logging.critical('unknown stroke-linejoin: %s', stroke_linejoin)
sys.exit(1)
def _ConvertStyleMap(self, style_map, output):
self._ConvertStyle(style_map['style'], output)
self._MaybeConvertColor(style_map.get('color'), output)
self._MaybeConvertShader(style_map.get('shader'), output)
self._MaybeConvertShadow(style_map['shadow'], output)
self._MaybeConvertStrokeWidth(style_map.get('stroke-width'), output)
self._MaybeConvertStrokeLinecap(style_map.get('stroke-linecap'), output)
self._MaybeConvertStrokeLinejoin(style_map.get('stroke-linejoin'), output)
output.WriteByte(CMD_PICTURE_PAINT_EOP)
def _ConvertStyleList(self, style_list, output):
output.WriteByte(len(style_list))
for style_map in style_list:
self._ConvertStyleMap(style_map, output)
def _ConvertStyleCategory(self, style_category, output):
output.WriteByte(1)
for id_prefix, category in STYLE_CATEGORY_MAP:
if style_category.startswith(id_prefix):
output.WriteByte(STYLE_CATEGORY_TAG + category)
return
logging.critical('unknown style_category: "%s"', style_category)
sys.exit(1)
def _ConvertPath(self, node, output):
path = node.get('d')
if path is None:
logging.critical('Unknown path')
sys.exit(1)
# TODO support continuous commands.
prev_control = None
prev = None
command_list = []
while True:
path = path.strip()
if not path:
break
command = path[0]
if command == 'm' or command == 'M':
# Move command.
(x, y), path = self._ConsumeFloatList(path[1:], 2)
if command == 'm' and prev is not None:
x += prev[0]
y += prev[1]
command_list.append((CMD_PICTURE_PATH_MOVE, x, y))
prev = (x, y)
prev_control = None
start = (x, y)
continue
if command == 'c' or command == 'C':
# Cubic curve.
(x1, y1, x2, y2, x, y), path = self._ConsumeFloatList(path[1:], 6)
if command == 'c':
x1 += prev[0]
y1 += prev[1]
x2 += prev[0]
y2 += prev[1]
x += prev[0]
y += prev[1]
command_list.append((CMD_PICTURE_PATH_CURVE, x1, y1, x2, y2, x, y))
prev = (x, y)
prev_control = (x2, y2)
continue
if command == 's' or command == 'S':
# Continued cubic curve.
(x2, y2, x, y), path = self._ConsumeFloatList(path[1:], 4)
if command == 's':
x2 += prev[0]
y2 += prev[1]
x += prev[0]
y += prev[1]
# if prev_control is not None:
# x1 = 2 * prev[0] - prev_control[0]
# y1 = 2 * prev[1] - prev_control[1]
# else:
# x1, y1 = prev
command_list.append((CMD_PICTURE_PATH_CONTINUED_CURVE, x2, y2, x, y))
prev = (x, y)
prev_control = (x2, y2)
continue
if command == 'h' or command == 'H':
# Horizontal line.
x, path = self._ConsumeFloat(path[1:])
if command == 'h':
x += prev[0]
y = prev[1]
command_list.append((CMD_PICTURE_PATH_HORIZONTAL_LINE, x))
prev = (x, y)
prev_control = None
continue
if command == 'v' or command == 'V':
# Vertical line.
y, path = self._ConsumeFloat(path[1:])
if command == 'v':
y += prev[1]
x = prev[0]
command_list.append((CMD_PICTURE_PATH_VERTICAL_LINE, y))
prev = (x, y)
prev_control = None
continue
if command == 'l' or command == 'L':
# Line.
(x, y), path = self._ConsumeFloatList(path[1:], 2)
if command == 'l':
x += prev[0]
y += prev[1]
command_list.append((CMD_PICTURE_PATH_LINE, x, y))
prev = (x, y)
prev_control = None
continue
if command == 'z' or command == 'Z':
# Close the path.
command_list.append((CMD_PICTURE_PATH_CLOSE,))
path = path[1:]
prev = start
prev_control = None
continue
logging.critical('Unknown command: %s', path)
sys.exit(1)
command_list.append((CMD_PICTURE_PATH_EOP,))
# Output.
output.WriteByte(CMD_PICTURE_DRAW_PATH)
for command in command_list:
output.WriteByte(command[0])
for coord in command[1:]:
output.WriteFloat(coord)
def _ConvertPathElement(
self, node, style_category, has_shadow, shader_map, output):
style_list = self._ParseStyle(node, has_shadow, shader_map)
self._ConvertPath(node, output)
if style_category is not None:
self._ConvertStyleCategory(style_category, output)
else:
self._ConvertStyleList(style_list, output)
def _ConvertPolylineElement(
self, node, style_category, has_shadow, shader_map, output):
point_list = self._ParseFloatList(node.get('points'))
if len(point_list) < 2:
logging.critical('Invalid point number.')
sys.exit(1)
style_list = self._ParseStyle(node, has_shadow, shader_map)
output.WriteByte(CMD_PICTURE_DRAW_POLYLINE)
output.WriteByte(len(point_list))
for coord in point_list:
output.WriteFloat(coord)
if style_category is not None:
self._ConvertStyleCategory(style_category, output)
else:
self._ConvertStyleList(style_list, output)
def _ConvertPolygonElement(
self, node, style_category, has_shadow, shader_map, output):
style_list = self._ParseStyle(node, has_shadow, shader_map)
point_list = self._ParseFloatList(node.get('points'))
output.WriteByte(CMD_PICTURE_DRAW_POLYGON)
output.WriteByte(len(point_list))
for coord in point_list:
output.WriteFloat(coord)
if style_category is not None:
self._ConvertStyleCategory(style_category, output)
else:
self._ConvertStyleList(style_list, output)
def _ConvertLineElement(
self, node, style_category, has_shadow, shader_map, output):
style_list = self._ParseStyle(node, has_shadow, shader_map)
x1 = float(node.get('x1'))
y1 = float(node.get('y1'))
x2 = float(node.get('x2'))
y2 = float(node.get('y2'))
output.WriteByte(CMD_PICTURE_DRAW_LINE)
output.WriteFloat(x1)
output.WriteFloat(y1)
output.WriteFloat(x2)
output.WriteFloat(y2)
if style_category is not None:
self._ConvertStyleCategory(style_category, output)
else:
self._ConvertStyleList(style_list, output)
def _ConvertCircleElement(
self, node, style_category, has_shadow, shader_map, output):
style_list = self._ParseStyle(node, has_shadow, shader_map)
cx = float(node.get('cx'))
cy = float(node.get('cy'))
r = float(node.get('r'))
output.WriteByte(CMD_PICTURE_DRAW_CIRCLE)
output.WriteFloat(cx)
output.WriteFloat(cy)
output.WriteFloat(r)
if style_category is not None:
self._ConvertStyleCategory(style_category, output)
else:
self._ConvertStyleList(style_list, output)
def _ConvertEllipseElement(
self, node, style_category, has_shadow, shader_map, output):
style_list = self._ParseStyle(node, has_shadow, shader_map)
cx = float(node.get('cx'))
cy = float(node.get('cy'))
rx = float(node.get('rx'))
ry = float(node.get('ry'))
output.WriteByte(CMD_PICTURE_DRAW_ELLIPSE)
output.WriteFloat(cx)
output.WriteFloat(cy)
output.WriteFloat(rx)
output.WriteFloat(ry)
if style_category is not None:
self._ConvertStyleCategory(style_category, output)
else:
self._ConvertStyleList(style_list, output)
def _ConvertRectElement(
self, node, style_category, has_shadow, shader_map, output):
style_list = self._ParseStyle(node, has_shadow, shader_map)
x = float(node.get('x', 0))
y = float(node.get('y', 0))
w = float(node.get('width'))
h = float(node.get('height'))
output.WriteByte(CMD_PICTURE_DRAW_RECT)
output.WriteFloat(x)
output.WriteFloat(y)
output.WriteFloat(w)
output.WriteFloat(h)
if style_category is not None:
self._ConvertStyleCategory(style_category, output)
else:
self._ConvertStyleList(style_list, output)
def _ConvertPictureSequence(
self, node, style_category, has_shadow, shader_map, output):
# Hack. To support shadow, we use 'id' attribute.
# If the 'id' starts with 'shadow', it means the element and its children
# has shadow.
nodeid = node.get('id', '')
if nodeid.startswith('shadow'):
has_shadow = HAS_SHADOW
elif nodeid.startswith('below_x5F_shadow'):
has_shadow = HAS_BELOW_SHADOW
if nodeid.startswith('style-'):
style_category = nodeid
if node.tag == '{http://www.w3.org/2000/svg}path':
self._ConvertPathElement(
node, style_category, has_shadow, shader_map, output)
return
if node.tag == '{http://www.w3.org/2000/svg}polyline':
self._ConvertPolylineElement(
node, style_category, has_shadow, shader_map, output)
return
if node.tag == '{http://www.w3.org/2000/svg}polygon':
self._ConvertPolygonElement(
node, style_category, has_shadow, shader_map, output)
return
if node.tag == '{http://www.w3.org/2000/svg}line':
self._ConvertLineElement(
node, style_category, has_shadow, shader_map, output)
return
if node.tag == '{http://www.w3.org/2000/svg}circle':
self._ConvertCircleElement(
node, style_category, has_shadow, shader_map, output)
return
if node.tag == '{http://www.w3.org/2000/svg}ellipse':
self._ConvertEllipseElement(
node, style_category, has_shadow, shader_map, output)
return
if node.tag == '{http://www.w3.org/2000/svg}rect':
self._ConvertRectElement(
node, style_category, has_shadow, shader_map, output)
return
if node.tag in ['{http://www.w3.org/2000/svg}g',
'{http://www.w3.org/2000/svg}svg']:
# Flatten child nodes.
for child in node:
self._ConvertPictureSequence(
child, style_category, has_shadow, shader_map, output)
return
if node.tag in ['{http://www.w3.org/2000/svg}linearGradient',
'{http://www.w3.org/2000/svg}radialGradient']:
return
logging.warning('Unknown element: %s', node.tag)
def _OutputEOP(self, output):
output.WriteByte(CMD_PICTURE_EOP)
def _ConvertPictureDrawableInternal(self, tree, output):
output.WriteByte(DRAWABLE_PICTURE)
shader_map = self._ParseShaderMap(tree.getroot())
self._ConvertSize(tree, output)
self._ConvertPictureSequence(
tree.getroot(), None, NO_SHADOW, shader_map, output)
self._OutputEOP(output)
# Interface for drawable conversion.
def ConvertPictureDrawable(self, path):
output = _OutputStream(StringIO.StringIO())
self._ConvertPictureDrawableInternal(ElementTree.parse(path), output)
return output.output.getvalue()
def ConvertStateListDrawable(self, drawable_source_list):
output = _OutputStream(StringIO.StringIO())
output.WriteByte(DRAWABLE_STATE_LIST)
output.WriteByte(len(drawable_source_list))
for (state_list, path) in drawable_source_list:
# Output state.
output.WriteByte(len(state_list))
for state in state_list:
output.WriteInt32(state)
# Output drawable.
self._ConvertPictureDrawableInternal(ElementTree.parse(path), output)
return output.output.getvalue()
# This method is actually not used, but we can use it to create animation
# drawables.
def ConvertAnimationDrawable(self, drawable_source_list):
output = _OutputStream(StringIO.StringIO())
output.WriteByte(DRAWABLE_ANIMATION)
output.WriteByte(len(drawable_source_list))
for (duration, path) in drawable_source_list:
# Output duration and corresponding picture drawable.
output.WriteInt16(duration)
self._ConvertPictureDrawableInternal(ElementTree.parse(path), output)
return output.output.getvalue()
def ConvertFiles(svg_dir, output_dir):
# Ensure that the output directory exists.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
converter = MozcDrawableConverter()
for dirpath, dirnames, filenames in os.walk(svg_dir):
for filename in filenames:
basename, ext = os.path.splitext(filename)
if ext != '.svg':
# Do nothing for files other than svg.
continue
# Filename hack to generate stateful .pic files.
if basename.endswith('_release') or basename.endswith('_selected'):
# 'XXX_release.svg' files will be processed with corresponding
# '_center.svg' files. So just skip them.
# As similar to it, '_selected.svg' files will be processed with
# corresponding non-selected .svg files.
continue
if basename == 'keyboard_fold_tab_up':
# 'keyboard_fold_tab_up.svg' file will be processed with
# 'keyboard_fold_tab_down.svg.' Just skip it, too.
continue
logging.info('Converting %s...', filename)
if basename.endswith('_center'):
# Process '_center.svg' file with '_release.svg' file to make
# stateful drawable.
center_svg_file = os.path.join(dirpath, filename)
release_svg_file = os.path.join(
dirpath, basename[:-7] + '_release.svg')
pic_file = os.path.join(output_dir, basename + '.pic')
pic_data = converter.ConvertStateListDrawable(
[([STATE_PRESSED], center_svg_file), ([], release_svg_file)])
elif os.path.exists(os.path.join(dirpath, basename + '_selected.svg')):
# Process '_selected.svg' file at the same time if necessary.
unselected_svg_file = os.path.join(dirpath, filename)
selected_svg_file = os.path.join(dirpath, basename + '_selected.svg')
pic_file = os.path.join(output_dir, basename + '.pic')
pic_data = converter.ConvertStateListDrawable(
[([STATE_SELECTED], selected_svg_file), ([], unselected_svg_file)])
elif basename == 'keyboard_fold_tab_down':
# Special hack for keyboard__fold__tab.pic.
down_svg_file = os.path.join(dirpath, filename)
up_svg_file = os.path.join(dirpath, 'keyboard_fold_tab_up.svg')
pic_file = os.path.join(output_dir, 'keyboard__fold__tab.pic')
pic_data = converter.ConvertStateListDrawable(
[([STATE_CHECKED], up_svg_file), ([], down_svg_file)])
else:
# Normal .svg file.
svg_file = os.path.join(dirpath, filename)
pic_file = os.path.join(output_dir, basename + '.pic')
pic_data = converter.ConvertPictureDrawable(svg_file)
with open(pic_file, 'wb') as stream:
stream.write(pic_data)
def ParseOptions():
parser = optparse.OptionParser()
parser.add_option('--svg_dir', dest='svg_dir',
help='Path to a directory containing .svg files.')
parser.add_option('--output_dir', dest='output_dir',
help='Path to the output directory,')
return parser.parse_args()[0]
def main():
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
logging.getLogger().addFilter(util.ColoredLoggingFilter())
options = ParseOptions()
ConvertFiles(options.svg_dir, options.output_dir)
if __name__ == '__main__':
main()
|
[
"matsu@users.sourceforge.net"
] |
matsu@users.sourceforge.net
|
f137c72c221aea441a62da38d458a08d6c236a23
|
ffb61ac9d593cf61c372a40909dfcbfb58842dc3
|
/gui.py
|
f5c0be93e41bbcc3d4a8b7eb0e11db6c728c2f6e
|
[] |
no_license
|
vasujain00/3D-Audio-For-Museum-Exhibits
|
6797f003b681266f3b55e7dec777b7d32f29eea6
|
9f4777faa00a2bcb3d3de855d1d85becc3189122
|
refs/heads/master
| 2021-04-15T04:55:12.990265
| 2018-03-14T03:17:09
| 2018-03-14T03:17:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,045
|
py
|
import sys
import hrtf
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QRadioButton,
QToolTip, QPushButton, QDesktopWidget, QMessageBox, QGridLayout)
from PyQt5.QtGui import QIcon, QFont, QPixmap
class window(QWidget):
def __init__(self):
super().__init__()
self.radio_buttons = []
self.initUI()
def hrtfWrapper(self):
"""
Sets the proper arguments to be passed in to the hrtf function
"""
radio_selected = 0
for i in range(len(self.radio_buttons)):
if self.radio_buttons[i].isChecked():
radio_selected = i + 1
fileName = ''
aIndex = 0
eIndex = 8 # about line of sight elevation
if radio_selected == 1:
fileName = 'audio/rice_pouring_mono.wav'
aIndex = 12
elif radio_selected == 2:
fileName = 'audio/steak_searing_mono.wav'
aIndex = 24
elif radio_selected == 3:
fileName = 'audio/plantain_frying_mono.wav'
aIndex = 0
hrtf.hrtf(fileName, aIndex, eIndex)
def initUI(self):
"""
Sets up the GUI for the application
"""
# Set initial size of window and placement of window
self.resize(800,400)
# Set title and icon for window
self.setWindowTitle('3D Audio For Museum Exhibits')
self.setWindowIcon(QIcon('images/icon.png'))
# Center the window
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
# Set a grid for widgets to fall in line with
grid = QGridLayout()
self.setLayout(grid)
# Customize ToolTip
QToolTip.setFont(QFont('SansSerif', 10))
# Customize play button
play_btn = QPushButton('Play')
play_btn.setToolTip('Begin playback of 3D audio')
play_btn.resize(100,33)
play_btn.clicked.connect(self.hrtfWrapper)
# Customize stop button
stop_btn = QPushButton('Stop')
stop_btn.setToolTip('Stop playback of 3D audio')
stop_btn.resize(100,33)
# Customize canvas
canvas = QPixmap('images/canvas.jpg')
canvas_lbl = QLabel(self)
canvas_lbl.setPixmap(canvas)
# Customize radio buttons
radio_one = QRadioButton("1 - Rice Cooking")
radio_one.setChecked(True)
self.radio_buttons.append(radio_one)
radio_two = QRadioButton("2 - Steak Searing")
self.radio_buttons.append(radio_two)
radio_three = QRadioButton("3 - Plantains Frying")
self.radio_buttons.append(radio_three)
# Add button widgets to grid
grid.addWidget(QWidget(), 1, 1, 1, 3) # empty space
grid.addWidget(canvas_lbl, 2, 1, 3, 3) # <---canvas
grid.addWidget(QWidget(), 5, 1, 1, 3) # empty space
grid.addWidget(play_btn, 5, 4) # play button
grid.addWidget(stop_btn, 5, 5) # stop button
grid.addWidget(radio_one, 1, 4) # first radio button
grid.addWidget(radio_two, 2, 4) # second radio button
grid.addWidget(radio_three, 3, 4) # third radio button
grid.addWidget(QWidget(), 1, 5, 4, 1) # empty space
# Show the window
self.show()
def closeEvent(self, event):
"""
Overrides the closeEvent function to ask if the user really wants to quit
the real-time application
"""
reply = QMessageBox.question(self, '3D Audio for Museum Exhibits',
"Are you sure you wish to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == '__main__':
# Represents our application and any command-line arguments
app = QApplication(sys.argv)
# Create window widget
w = window()
# Begin the mainloop execution and assure graceful exiting
sys.exit(app.exec_())
|
[
"shandysulen@gmail.com"
] |
shandysulen@gmail.com
|
b4fb587bbd2e9430ada729748ff9465c11b842d1
|
efde933d7ccfc1976c6e2f90c4051aedc7569d25
|
/josephus_problem_1.py
|
167d8fffc137da1f442913ccc53fd33e1d82c5ad
|
[] |
no_license
|
Hhzzj/python_homework
|
e986021cb572dbfe852b02797acc98fe7114f974
|
6dd054f5483744722d12ed555bfc5d5a14b34308
|
refs/heads/main
| 2023-07-03T08:06:02.336029
| 2021-07-25T08:20:43
| 2021-07-25T08:20:43
| 382,084,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
import random
CHARS1 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
CHARS2 = "abcdefghijklmnopqrstuvwxyz"
class people:
def __init__(self, last_name, first_name, people_id):
self.last_name = last_name
self.first_name = first_name
self.people_id = people_id
def get_name(CHARS): # 随机两个大写/小写字母作为姓/名
assert CHARS != ''
name = ''
for i in range(2):
name += random.choice(CHARS)
return name
def get_people_list(people_nums):
assert people_nums > 0
people_list = []
for i in range(people_nums):
last_name = get_name(CHARS1) # 随机两个大写字母作为姓
first_name = get_name(CHARS2) # 随机两个小写字母作为名
people_id = i+1 # 从1开始给每个人编号
people_list.append(people(last_name, first_name, people_id))
return people_list
def josephus_circle(people_nums, interval, start_people_id):
assert people_nums > 0
assert interval > 0
assert 0 < start_people_id <= people_nums
people_list = get_people_list(people_nums)
josephus_circle_list = []
for i in range(len(people_list)): # 根据开始报数人的编号找开始报数人的位置索引
if(start_people_id == people_list[i].people_id):
people_index = i
break
while(len(people_list)):
people_index = (people_index+interval-1) % len(people_list)
josephus_circle_list.append(people_list.pop(people_index))
return josephus_circle_list
if __name__ == '__main__':
josephus_circle_list = josephus_circle(11, 3, 4)
for people in josephus_circle_list:
print(people.last_name, people.first_name, people.people_id)
|
[
"noreply@github.com"
] |
Hhzzj.noreply@github.com
|
b8e2120fcd66ff56ce5658f05e466269e248c642
|
99459cd11263f721155316164afddd1accf6419f
|
/stack.py
|
7dd3b5ad53c0b158b87031a28ec838fc68eca0de
|
[] |
no_license
|
dvmazuera/cs-data-structures-assessment
|
5dc767241bb8a1821726c5b13a96140a59d0babf
|
21082045955fa23cf26dd9dd52fdf9c22c0db31b
|
refs/heads/master
| 2021-01-22T11:29:32.020412
| 2017-05-29T03:55:31
| 2017-05-29T03:55:31
| 92,704,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,950
|
py
|
class StackEmptyError(IndexError):
"""Attempt to pop an empty stack."""
class Stack(object):
"""LIFO stack.
Implemented using a Python list; since stacks just need
to pop and push, a list is a good implementation, as
these are O(1) for native Python lists. However, in cases
where performance really matters, it might be best to
use a Python list directly, as it avoids the overhead
of a custom class.
Or, for even better performance (& typically smaller
memory footprint), you can use the `collections.deque`
object, which can act like a stack.
(We could also write our own LinkedList class for a
stack, where we push things onto the head and pop things
off the head (effectively reversing it), but that would be less
efficient than using a built-in Python list or a
`collections.deque` object)
"""
def __init__(self):
self._list = []
def __repr__(self):
if not self._list:
return "<Stack (empty)>"
else:
return "<Stack tail=%s length=%d>" % (
self._list[-1], len(self._list))
def push(self, item):
"""Add item to end of stack."""
self._list.append(item)
def pop(self):
"""Remove item from end of stack and return it."""
if not self._list:
raise StackEmptyError()
return self._list.pop()
def __iter__(self):
"""Allow iteration over list.
__iter__ is a special method that, when defined,
allows you to loop over a list, so you can say things
like "for item in my_stack", and it will pop
successive items off.
"""
while True:
try:
yield self.pop()
except StackEmptyError:
raise StopIteration
def length(self):
"""Return length of stack::
>>> s = Stack()
>>> s.length()
0
>>> s.push("dog")
>>> s.push("cat")
>>> s.push("fish")
>>> s.length()
3
"""
return len(self._list)
def empty(self):
"""Empty stack::
>>> s = Stack()
>>> s.push("dog")
>>> s.push("cat")
>>> s.push("fish")
>>> s.length()
3
>>> s.empty()
>>> s.length()
0
"""
self._list = []
def is_empty(self):
"""Is stack empty?
>>> s = Stack()
>>> s.is_empty()
True
>>> s.push("dog")
>>> s.push("cat")
>>> s.push("fish")
>>> s.is_empty()
False
"""
return not bool(self._list)
if __name__ == "__main__":
import doctest
print
result = doctest.testmod()
if not result.failed:
print "ALL TESTS PASSED. GOOD WORK!"
print
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
588200b38ff9087f4a8f7ccd550934f464c836ad
|
7ae1665ada0bf1a266dcf6352a19b8c19181dc3a
|
/hw7/NN_music.py
|
b547e64a691b6adcecbbe757fb6635163f5e042b
|
[] |
no_license
|
markmty/601-IML
|
d71e3aaba6079178470f74c3239ad48fc905bda4
|
140c736ee459a66ffc8ead1412224c5747a10109
|
refs/heads/master
| 2021-01-11T14:19:26.244311
| 2017-02-08T16:07:35
| 2017-02-08T16:07:35
| 81,347,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,713
|
py
|
import sys, math, random
import numpy as np
file_name1 = sys.argv[1]
fd1=open(file_name1)
data=fd1.read()
raw_list1=data.split("\n")
fd1.close()
raw_list1.pop(0)
raw_list1.pop(-1)
list1=[[]] * len(raw_list1)
for i in xrange(len(list1)):
list1[i] = raw_list1[i].split(",")
#print list1
if file_name1=="music_train.csv":
num_attr = 4
else:
num_attr = 5
#initialize input from string into numbers
for i in xrange(len(list1)):
for j in xrange(num_attr+1):
if (list1[i][j] == "yes") or (list1[i][j] == "yes\r"):
list1[i][j] = 1.0
elif (list1[i][j] == "no") :
list1[i][j] = -1.0
elif (list1[i][j] == "no\r") :
list1[i][j] = 0.0
else:
if file_name1=="education_train.csv":
list1[i][j]= float(list1[i][j])/100
list1[i][j]= float(list1[i][j])
if list1[i][j]>1899:
list1[i][j] = (list1[i][j]-1900.0)/100.0
#print list1
truths=[0.0]*len(list1)
for i in xrange(len(list1)):
truths[i]=list1[i][num_attr]
#print truths
inputs=[[]]*len(list1)
for i in xrange(len(list1)):
inputs[i]=list1[i][0:(num_attr)]
#print inputs
file_name2= sys.argv[2]
fd2=open(file_name2)
data=fd2.read()
raw_list2=data.split("\n")
fd2.close
raw_list2.pop(0)
raw_list2.pop(-1)
#print raw_list2
list2=[[]] * len(raw_list2)
for i in xrange(len(list2)):
list2[i] = raw_list2[i].split(",")
#print list2
for i in xrange(len(list2)):
for j in xrange(num_attr):
if (list2[i][j] == "yes") or (list2[i][j] == "yes\r"):
list2[i][j] = 1.0
elif (list2[i][j] == "no") :
list2[i][j] = -1.0
elif (list2[i][j] == "no\r") :
list2[i][j] = 0.0
else:
if file_name2=="education_dev.csv":
list2[i][j]= float(list2[i][j])/100
elif list2[i][j]>1899:
list2[i][j] = (float(list2[i][j])-1900.0)/100.0
else:
list2[i][j]= float(list2[i][j])
tests=[[]]*len(list2)
for i in xrange(len(list2)):
tests[i]=list2[i][0:(num_attr)]
#print tests
def normalize(x):
if file_name1=="music_train.csv":
if x>0.5:
return "yes"
else:
return "no"
else:
return round(x*100)
def sigmoid(x):
return 1.0/(1.0+math.exp(-x))
class BP_NN:
def __init__(self, num_input, num_hiden, num_output):
self.num_input = num_input+1
self.num_hiden = num_hiden+1
self.num_output = num_output
#initialize
self.input = np.ones((1, self.num_input))
self.hiden = np.ones((1, self.num_hiden))
self.output = np.ones((1, self.num_output))
#create weights
self.w_ih = np.zeros((self.num_hiden, self.num_input))
self.w_ho = np.zeros((self.num_output, self.num_hiden))
for j in xrange(self.num_hiden):
for i in xrange(self.num_input):
self.w_ih[j][i] = random.uniform(-0.8,0.8)
#print self.w_ih
for k in xrange(self.num_output):
for j in xrange(self.num_hiden):
self.w_ho[k][j] = random.uniform(-0.8,0.8)
#print self.w_ho
def calculate(self, input):
for i in xrange(self.num_input-1):
self.input[0][i] = input[i]
for j in xrange(self.num_hiden-1):
temp=np.dot(self.w_ih, np.transpose(self.input))
self.hiden[0][j]=sigmoid(temp[j][0])
#print self.hiden
for k in xrange(self.num_output):
temp=np.dot(self.w_ho, np.transpose(self.hiden))
self.output[0][k] = sigmoid(temp[k][0])
#print self.output
return self.output[0][0]
def backpropagation(self, truth, learn_rate):
#calculate error of outputs
err_output = [0.0]* self.num_output
for k in xrange(self.num_output):
err = truth[k] - self.output[0][0]
err_output[k] = err*(self.output[0][k])*(1-(self.output[0][k]))
#calculate error of hidden layer
err_hiden = [0.0] * self.num_hiden
for j in xrange(self.num_hiden):
err = 0.0
for k in xrange(self.num_output):
err += err_output[k]*self.w_ho[k][j]
err_hiden[j]=err*(self.hiden[0][j])*(1-(self.hiden[0][j]))
# update weights
for j in xrange(self.num_hiden):
for k in xrange(self.num_output):
self.w_ho[k][j] += learn_rate*err_output[k]*self.hiden[0][j]
for i in xrange(self.num_input):
for j in xrange(self.num_hiden):
self.w_ih[j][i] += learn_rate*err_hiden[j]*self.input[0][i]
#print self.w_ih
def cal_error(self, trainings, truths ):
predictions = [0.0] * len(truths)
for i in xrange(len(predictions)):
predictions[i] = self.calculate(trainings[i])
error = 0.0
for k in xrange(len(truths)):
error += 0.5*(predictions[k]-truths[k])**2
return error
def train(self, inputs, truths):
for i in xrange(len(inputs)):
self.calculate(inputs[i])
self.backpropagation([truths[i]], 0.2)
#print self.w_ih
print self.cal_error(inputs,truths)
def predict(self, inputs):
for i in xrange(len(inputs)):
temp= self.calculate(inputs[i])
print normalize(temp)
n=BP_NN(num_attr,num_attr,1)
#n.calculate(inputs[0])
#n.backpropagation([truths[0]],0.1)
#n.calculate(inputs[1])
#n.backpropagation([truths[1]],0.1)
for i in xrange(1000):
n.train(inputs,truths)
print "TRAINING COMPLETED! NOW PREDICTING."
n.predict(tests)
|
[
"mitianyu@loginsight.cn"
] |
mitianyu@loginsight.cn
|
0a81afd1bccfde119b3571c2a5ba4395ebb7b44f
|
e5cd01fd620e8e746a20b883de7ac32bec4feb5c
|
/Ejercicios python/PE4/PE4E3.py
|
53f599b8b515986576a6731ce8932f4c3575fac2
|
[] |
no_license
|
eazapata/python
|
0f6a422032d8fb70d26f1055dc97eed83fcdc572
|
559aa4151093a120527c459a406abd8f2ff6a7d8
|
refs/heads/master
| 2020-08-27T09:19:29.395109
| 2019-11-23T20:11:14
| 2019-11-23T20:11:14
| 217,314,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#PE4E3 Eduardo Antonio Zapata Valero
#Pida al usuario si quiere calcular el área de un triángulo o un cuadrado,
#y pida los datos según que caso y muestre el resultado.
fig=(input("Quieres calcular el área de un triángulo (t) o de un cuadrado (c) "))
if (fig=="t"):
print ("Ha elegido triángulo, introduce base y altura del triángulo\n")
b=float(input())
h=float(input())
print("El área del triángulo es ",(b*h)/2)
elif(fig=="c"):
l=float(input("Has elegido cuadrado, introduce el valor del lado\n"))
print("El área del cudrado es ",(l*l))
else:
print("No se reconoce la figura que de la que quieres sacar el área")
|
[
"you@example.com"
] |
you@example.com
|
6e6da79ce5b90732e27a6125bc590e5dcfb3864e
|
9ecf3f508998e2ca1008adce29351fa0192d96b8
|
/KNN识别海绵宝宝和派大星.py
|
d07d8af5554f520fdb3a11f5e4d5fb930c17d55a
|
[] |
no_license
|
Suspect-wu/carton
|
5dfff9cd3d05a6f1902b7f260f0a512302fc804d
|
7ebcec49968db2e43e89a1819e6bdaf18c567894
|
refs/heads/master
| 2022-11-01T10:32:23.643037
| 2022-10-31T06:59:44
| 2022-10-31T06:59:44
| 271,221,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,343
|
py
|
import numpy as np
import os
from sklearn.neighbors import KNeighborsClassifier
import csv
from sklearn.preprocessing import scale
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from sklearn.metrics import classification_report
from PIL import Image
plt.rcParams['font.sans-serif']=['simhei']
plt.rcParams['axes.unicode_minus']=False
myfont = fm.FontProperties(fname=r'D:\Fonts\simkai.ttf')
csv_name = 'location.csv'
def read_csv(csv_name):
photos = []
labels = []
with open(os.path.join('D:', csv_name), encoding='utf-8-sig') as f:
reader = csv.reader(f)
for row in reader:
photo_loc, label= row
photos.append(photo_loc)
labels.append(int(label))
print(labels)
print(photos)
return photos, labels
def get_photo(photos, labels):
photo_filenames = photos
photo_list = []
labels_list =labels
for photo in photo_filenames:
im = Image.open(photo)
im = np.array(im)
photo_list.append(im)
photo_list = np.array(photo_list, dtype='int32')
labels_list = np.array(labels_list, dtype='int32')
pro_photo_list = np.array(photo_list, dtype='float64')
pro_photo_list = np.reshape(pro_photo_list, (pro_photo_list.shape[0], 256*256*3))
labels_list = np.array(labels_list, dtype=np.int)
return pro_photo_list, labels_list, photo_list
if __name__ == '__main__' :
photos, labels = read_csv(csv_name)
photo_list, labels, list = get_photo(photos, labels)
photo_list = scale(photo_list)
tr_photo = photo_list[:1000, :]
tr_labels = labels[:1000]
te_photo = photo_list[1000:, :]
te_labels = labels[1000:]
n = len(te_labels)
class_name = ['海绵宝宝', '皮卡丘', '香蕉', '小黄人', '柯南']
plt.figure(figsize=(10, 10))
for i in range(16):
plt.subplot(4, 4, 1+i)
plt.xticks([])
plt.yticks([])
plt.xlabel(class_name[int(labels[i])],fontsize=12)
plt.imshow(list[i], cmap=plt.cm.binary)
plt.grid(False)
plt.show()
for i in range(1, 11):
knn = KNeighborsClassifier(n_neighbors=i, n_jobs=-1)
knn.fit(tr_photo, tr_labels)
predict_labels = knn.predict(te_photo)
print('k={}时'.format(i), classification_report(te_labels, predict_labels, target_names=class_name))
|
[
"1021513495@qq.com"
] |
1021513495@qq.com
|
7a3c32699a35792ebbc6e0e18450ac35ad6de42d
|
cbb1c237ce552ad22d4c9874de333aba4158a5bb
|
/2937.py
|
ac823d5e0d362b2cfeadf2fb3ae32710027113a8
|
[] |
no_license
|
Juvora/Lesson_2
|
ab27b566e2568b19fc84ff2761d7aa7343ee3214
|
2c5a7cd70e97e76b4df8c7bef127017fb427deb8
|
refs/heads/master
| 2020-06-30T03:08:57.449354
| 2019-08-05T21:28:37
| 2019-08-05T21:28:37
| 200,704,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
num1 = int(input())
print("The next number for the number ", num1," is ",num1 + 1, ".", sep='')
print("The previous number for the number ", num1," is ",num1 -1, ".", sep='')
|
[
"12340987@rambler.ru"
] |
12340987@rambler.ru
|
c21b1ffdec708ffd7c485a3f60057cad62c5445c
|
ae033c4d58ec0c0c4b0c55550d7fbdabf919a21f
|
/BucketList/migrations/0008_auto__del_field_bucketlistitem_south_test.py
|
be55bc3669bc084b2f48faca07bf41f2326b6860
|
[] |
no_license
|
Cmlsltnl/BucketListCalculator
|
9079fe72cf895778d4fae09c628bdbdf3cd1e5df
|
cb6bd6475e0fb3f8854f65abbaf883a1069cdb50
|
refs/heads/master
| 2021-01-13T03:08:45.214543
| 2015-01-21T21:58:49
| 2015-01-21T21:58:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,480
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'BucketListItem.south_test'
db.delete_column(u'BucketList_bucketlistitem', 'south_test')
def backwards(self, orm):
# Adding field 'BucketListItem.south_test'
db.add_column(u'BucketList_bucketlistitem', 'south_test',
self.gf('django.db.models.fields.CharField')(default=0, max_length=20),
keep_default=False)
models = {
u'BucketList.bucketlistitem': {
'Meta': {'object_name': 'BucketListItem'},
'cost': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'crossed_off': ('django.db.models.fields.BooleanField', [], {}),
'goal_type': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'hours': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pub_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'time': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'BucketList.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'age': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '3'}),
'hourly_wage': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'life_expectancy': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '3'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'yearly_earnings': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '8'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['BucketList']
|
[
"michaelrsullivan1@gmail.com"
] |
michaelrsullivan1@gmail.com
|
c8f6fa99fa12ecc4e374395ccc33f4453288533c
|
c0f33647cb25f108d1516877eb4179296c51ade0
|
/scraper.py
|
4d55e666e6d70d76f5e3dba61f133601e315ca64
|
[] |
no_license
|
huiwamwang/web_scraper
|
301b1d622f62cfa5d545940dfd546deb2095f5b3
|
6520f02a1130d2f877f766d3f9aa0e0d796a7c37
|
refs/heads/master
| 2023-03-16T00:02:18.015179
| 2021-03-07T20:15:59
| 2021-03-07T20:15:59
| 345,443,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,152
|
py
|
"""A script to save articles from https://www.nature.com/nature/articles
as separated files at local directories.
User inputs number of pages to look at from nature website and article type to look for.
Article would be saved in separated folders, named 'Page_N', where N is from user input.
"""
import requests
import os
from bs4 import BeautifulSoup
from string import punctuation
n_pages, article_type = [input() for i in range(2)]
saved_articles = []
for page in range(1, int(n_pages) + 1):
os.mkdir(f'Page_{page}')
request = requests.get(f'https://www.nature.com/nature/articles?searchType=journalSearch&sort=PubDate&page={page}')
soup = BeautifulSoup(request.content, 'html.parser')
links = soup.find_all('article')
for link in links:
topic = link.find('span', {'class': 'c-meta__type'})
href = link.a.get('href')
if topic.text == article_type:
title = link.a.text
for char in title:
if char in punctuation:
title = title.replace(char, '')
if char == " ":
title = title.replace(char, '_')
saved_articles.append(title)
article_request = requests.get(f"https://www.nature.com{href}")
article_soup = BeautifulSoup(article_request.content, 'html.parser')
article = article_soup.find('div', {'class': 'article-item__body'})
if not article:
article = article_soup.find('div', {'class': 'article__body cleared'})
if article:
article = article.find_all('p')
with open(f'Page_{page}/{title}.txt', 'wb') as file:
for string in article:
file.write(string.text.encode())
else:
article = article_soup.text.replace('\n', '')
with open(f'Page_{page}/{title}.txt', 'wb') as file:
file.write(article.encode())
"""This part for testing purposes"""
print(page, title)
print(f"https://www.nature.com{href}")
print(f"Saved articles: ", saved_articles)
|
[
"huiwamwang@google.com"
] |
huiwamwang@google.com
|
a148b2ef374d9aab9a2884d1787f6cefe2b61bff
|
0e86dddccefd61089658695d17d7289fd2e81005
|
/aula12.py
|
cc9c9b5b6c6ac139bfad179df13f281f5d4a0f43
|
[] |
no_license
|
cauegonzalez/cursoPython
|
76279692b03e7d55610768db03475d6cb1ab2292
|
d7e50d7882aa81ccbcfa5e2503151a6281711d48
|
refs/heads/master
| 2021-01-01T04:52:18.594691
| 2020-03-02T03:56:21
| 2020-03-02T03:56:21
| 97,258,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
# coding=UTF-8
# Crie um programa que utilize cores nas saídas
print('====Exercício Aula 12====')
nome = raw_input('Qual é seu nome? ')
if nome == 'Cauê':
print('Que nome bonito!')
elif nome == 'Pedro' or nome == 'Maria' or nome == 'Paulo':
print('Seu nome é bem popular no Brasil')
elif nome in 'Ana Cláudia Mariana Juliana':
print('Belo nome feminino')
else:
print('Seu nome é bem normal.')
print('Tenha um bom dia, {}!'.format(nome))
|
[
"cauegonzalez@gmail.com"
] |
cauegonzalez@gmail.com
|
ac30a5209ae983f55392b30fbe675c86263eedb6
|
c862dfef55eece86db110eb84d61f73a3f772551
|
/smartcontract/venv/lib/python3.6/site-packages/boa_test/example/demo/ICO_Template.py
|
ec53f9fd41350b3dcd464a9a7b8cfe2955bbc86e
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
vedanova/neoinvoice
|
682b8e1f83e8b92a70545560abd7c56b2d6e7d0c
|
bc9a0217858938b49f99fef13b3439f4a537a5f5
|
refs/heads/master
| 2020-03-31T09:22:56.662230
| 2018-05-11T14:19:32
| 2018-05-11T14:19:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
"""
NEX ICO Template
===================================
Author: Thomas Saunders
Email: tom@neonexchange.org
Date: Dec 11 2017
"""
from boa_test.example.demo.nex.txio import get_asset_attachments
from boa_test.example.demo.nex.token import *
from boa_test.example.demo.nex.crowdsale import *
from boa_test.example.demo.nex.nep5 import *
from boa.interop.Neo.Runtime import GetTrigger, CheckWitness
from boa.interop.Neo.TriggerType import Application, Verification
from boa.interop.Neo.Storage import *
ctx = GetContext()
NEP5_METHODS = ['name', 'symbol', 'decimals', 'totalSupply', 'balanceOf', 'transfer', 'transferFrom', 'approve', 'allowance']
def Main(operation, args):
"""
:param operation: str The name of the operation to perform
:param args: list A list of arguments along with the operation
:return:
bytearray: The result of the operation
"""
trigger = GetTrigger()
# This is used in the Verification portion of the contract
# To determine whether a transfer of system assets ( NEO/Gas) involving
# This contract's address can proceed
if trigger == Verification():
# check if the invoker is the owner of this contract
is_owner = CheckWitness(TOKEN_OWNER)
# If owner, proceed
if is_owner:
return True
# Otherwise, we need to lookup the assets and determine
# If attachments of assets is ok
attachments = get_asset_attachments()
return False
# return can_exchange(ctx,attachments, True)
elif trigger == Application():
for op in NEP5_METHODS:
if operation == op:
return handle_nep51(ctx, operation, args)
if operation == 'deploy':
return deploy()
elif operation == 'circulation':
return get_circulation(ctx)
# the following are handled by crowdsale
elif operation == 'mintTokens':
return perform_exchange(ctx)
elif operation == 'crowdsale_register':
return kyc_register(ctx, args)
elif operation == 'crowdsale_status':
return kyc_status(ctx, args)
elif operation == 'crowdsale_available':
return crowdsale_available_amount(ctx)
elif operation == 'get_attachments':
return get_asset_attachments()
return 'unknown operation'
return False
def deploy():
"""
:param token: Token The token to deploy
:return:
bool: Whether the operation was successful
"""
if not CheckWitness(TOKEN_OWNER):
print("Must be owner to deploy")
return False
if not Get(ctx, 'initialized'):
# do deploy logic
Put(ctx, 'initialized', 1)
Put(ctx, TOKEN_OWNER, TOKEN_INITIAL_AMOUNT)
return add_to_circulation(ctx, TOKEN_INITIAL_AMOUNT)
return False
|
[
"gil.bueno@martinlabs.com.br"
] |
gil.bueno@martinlabs.com.br
|
781f3a6e160c379ff6d954df386de30bf152e432
|
86afce3d1e07c6580f9448f120a5150b44e0eb96
|
/Python/result_tree.py
|
c6d27d7409b88687e3c9f5f0c577b30f48943387
|
[] |
no_license
|
GrayXue/AutomationWithCANalyzer
|
1a5f3b458e9e216eb65edfedce5f578900ba2478
|
34e7bd4af72edad414969d3a0cee74f5322b273c
|
refs/heads/master
| 2020-04-16T02:56:21.199075
| 2018-11-30T14:31:28
| 2018-11-30T14:31:28
| 165,214,185
| 0
| 1
| null | 2019-01-11T09:12:09
| 2019-01-11T09:12:09
| null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
import ttk
import Tkinter as tk
class ResultTreeList(ttk.Treeview):
def __init__(self, parent=None, **kw):
ttk.Treeview.__init__(self, parent, **kw)
self.make()
def make(self):
#self.column("#0", width=90)
list_columns = ["pass", "partial", "fail", "total"]
self['columns'] = ("pass", "partial", "fail", "total")
for column in list_columns:
self.column(column, width=40)
self.heading(column, text=column.capitalize())
def insert_row(self, item_name, test_desc, res):
# res has format: ("", "", "", "")
self.insert("", 'end', item_name, text=test_desc, values=res)
def update_result(self, item_name, column_name, value):
self.set(item_name, column_name, value)
if __name__ == "__main__":
import ttk
import Tkinter as tk
root = tk.Tk()
|
[
"noreply@github.com"
] |
GrayXue.noreply@github.com
|
aaa5cbfaf84c4bccfcde4ebbe5c529ec97c0fcb0
|
a5636dd0de705a31f090a8914be9d1ad6b19fbb4
|
/tag_app.py
|
ca13ee351e8caa1e234f5eacc708253f82824044
|
[] |
no_license
|
maryannv/tag-o-matic
|
95544311942480a56c716241d0fdc29bf6765f34
|
b7614f60821f05e5255464e424d64803d503b5c8
|
refs/heads/master
| 2021-01-10T07:17:11.060397
| 2015-05-22T19:55:19
| 2015-05-22T19:55:19
| 36,057,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,222
|
py
|
import os, requests
from flask import Flask, jsonify, render_template, request
from TagHTMLParser import TagHTMLParser
app = Flask(__name__)
#app.debug = True
#### Routing ####
@app.route('/')
def home():
return render_template('index.html')
@app.route('/_parse_url')
def parse_url():
try:
urlinput = request.args.get('urlinput')
r = requests.get(urlinput)
content_headers = r.headers.get('Content-Type', '')
# if the "content-type" header exists, verify this is an html response
# Note: sometimes the content-type header is missing, but response is still html
if not ("content-type" in r.headers and "text/html" in content_headers):
raise ValueError("Content is not html")
# If the assumed encoding (r.encoding) is not specified in the headers,
# replace it with more accurate chardet based detection (r.apparent_encoding)
# Note: r.apparent_encoding is expensive!
if r.encoding not in content_headers:
r.encoding = r.apparent_encoding
html = r.text
if not html:
html = ""
rawstrings = html.split("\n")
parser = TagHTMLParser()
parser.feed(html)
return jsonify(result=render_template('summary.html', rawstrings=rawstrings, tagcounts=parser.tagcounts, lines_info=parser.lines_info))
except ValueError, err:
return jsonify_error("Invalid Input: "+ str(err))
except Exception, err:
return jsonify_error("Could not process url: " + str(err))
def jsonify_error(err_msg):
return jsonify(error=err_msg)
##### Template Filters #####
@app.template_filter('string_slice')
def string_slice(s, start, end):
if s and start >= 0 and end <= len(s):
return s[start:end]
return s
@app.template_filter('get_tag_uid')
def get_tag_uid(tag, tagdict):
# return unique id for tag, based on position in tagdict.keys()
if tagdict and (tag in tagdict):
return "tagid-"+str(tagdict.keys().index(tag))
return tag
@app.template_filter('get_taginfo_for_line')
def get_taginfo_for_line(lines_info, lindex):
if lines_info and (lindex in lines_info):
return lines_info[lindex]
return []
|
[
"vellanikaran@gmail.com"
] |
vellanikaran@gmail.com
|
b85d7944f883d5fc1dae7e069f5d5cb234104815
|
0df124c41cbaa94750df79fc70bf911d298610a7
|
/train_kFold.py
|
af272569fc2d9f5c6934814ab1624fffa7f18f92
|
[] |
no_license
|
bcaitech1/p2-klue-HYLee1008
|
7093a9245fe3ad9bf29251a4c12f12a801b9f4f5
|
c22d1a1ba8e3aa89198d786845a0ad6efc69e27c
|
refs/heads/main
| 2023-04-10T11:18:11.500052
| 2021-04-22T11:23:23
| 2021-04-22T11:23:23
| 360,466,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,318
|
py
|
import argparse
import pickle as pickle
import os
import pandas as pd
import numpy as np
import torch
import random
import transformers
import glob
import time
import json
import wandb
from sklearn.metrics import accuracy_score
from transformers import AutoTokenizer, BertForSequenceClassification, Trainer, TrainingArguments, BertConfig, BertTokenizerFast, BertModel, XLMRobertaTokenizer
from pathlib import Path
from sklearn.model_selection import KFold
from load_data import *
from model import BERTClassifier, XLMRoBERTAClassifier, BERTLarge, KoElectraClassifier, mbart
from loss import LabelSmoothingLoss
from torch.utils.tensorboard import SummaryWriter
def increment_path(path, exist_ok=False):
""" Automatically increment path, i.e. runs/exp --> runs/exp0, runs/exp1 etc.
Args:
path (str or pathlib.Path): f"{model_dir}/{args.name}".
exist_ok (bool): whether increment path (increment if False).
"""
path = Path(path)
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}*")
matches = [re.search(rf"%s(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m]
n = max(i) + 1 if i else 2
return f"{path}{n}"
# seed 고정
def seed_everything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def train(args):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# model save path
save_dir = increment_path(os.path.join(args.model_dir, args.bert_model))
os.makedirs(save_dir, exist_ok=True)
# save args on .json file
with open(os.path.join(save_dir, 'config.json'), 'w', encoding='utf-8') as f:
json.dump(vars(args), f, ensure_ascii=False, indent=4)
# set random seed
seed_everything(args.seed)
# load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(args.bert_model)
# load dataset
train_dataset = load_data("/opt/ml/input/data/train/train.tsv")
train_label = train_dataset['label'].values
# tokenizing dataset
tokenized_train = tokenized_dataset(train_dataset, tokenizer)
# make dataset for pytorch.
RE_train_dataset = RE_Dataset(tokenized_train, train_label)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
kfold = KFold(n_splits=5)
for fold, (train_index, valid_index) in enumerate(kfold.split(train_dataset), 1):
train_sub = torch.utils.data.Subset(RE_train_dataset, train_index)
valid_sub = torch.utils.data.Subset(RE_train_dataset, valid_index)
train_loader = torch.utils.data.DataLoader(
train_sub,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers
)
valid_loader = torch.utils.data.DataLoader(
valid_sub,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers
)
# load model
model = XLMRoBERTAClassifier(args.bert_model).to(device)
model = mbart(args.bert_model).to(device)
# load optimizer & criterion
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
criterion = LabelSmoothingLoss(smoothing=args.smoothing)
best_acc, last_epoch = 0, 0
for epoch in range(1, args.epochs + 1):
model.train()
loss_value = 0
start_time = time.time()
for batch_id, item in enumerate(train_loader):
input_ids = item['input_ids'].to(device)
# token_type_ids = item['token_type_ids'].to(device)
attention_mask = item['attention_mask'].to(device)
labels = item['labels'].to(device)
optimizer.zero_grad()
output = model(input_ids, attention_mask)
loss = criterion(output, labels)
loss_value += loss.item()
loss.backward()
optimizer.step()
# scheduler.step()
train_loss = loss_value / (batch_id + 1)
# evaluate model on dev set
with torch.no_grad():
model.eval()
acc_vals = 0
for batch_id, item in enumerate(valid_loader):
input_ids = item['input_ids'].to(device)
# token_type_ids = item['token_type_ids'].to(device)
attention_mask = item['attention_mask'].to(device)
labels = item['labels'].to(device)
output = model(input_ids, attention_mask)
pred = torch.argmax(output, dim=-1)
acc_item = (labels == pred).sum().item()
acc_vals += acc_item
val_acc = acc_vals / len(valid_sub)
time_taken = time.time() - start_time
# metric = {'val_acc': val_acc}
# wandb.log(metric)
print("fold: {} epoch: {}, loss: {}, val_acc: {}, time taken: {}".format(fold, epoch, train_loss, val_acc, time_taken))
if best_acc < val_acc:
print(f'best model! saved at fold {fold} epoch {epoch}')
if os.path.isfile(f"{save_dir}/{fold}_best_{last_epoch}.pth"):
os.remove(f"{save_dir}/{fold}_best_{last_epoch}.pth")
torch.save(model.state_dict(), f"{save_dir}/{fold}_best_{epoch}.pth")
best_acc = val_acc
last_epoch = epoch
# save model
torch.save(model.state_dict(), f"{save_dir}/{fold}_last_{epoch}.pth")
def main(args):
train(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Data and model checkpoints directories
parser.add_argument('--seed', type=int, default=1024, help='random seed (default: 1024)')
parser.add_argument('--epochs', type=int, default=10, help='number of epochs for train (deafult: 10)')
parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training (deafult: 16)')
parser.add_argument('--num_workers', type=int, default=4, help='number of workers for dataloader (default: 4)')
parser.add_argument('--smoothing', type=float, default=0.2, help='label smoothing facotr for label smoothing loss (default: 0.2)')
parser.add_argument('--learning_rate', type=float, default=1e-5, help='learning rate for training (default: 1e-5)')
parser.add_argument('--weight_decay', type=float, default=0.01, help='weight decay (default: 0.01)')
parser.add_argument('--model_dir', type=str, default='./results/kfold', help='directory where model would be saved (default: ./results)')
# xlm-roberta-large
# joeddav/xlm-roberta-large-xnli
# monologg/koelectra-base-v3-discriminator
# facebook/mbart-large-cc25
parser.add_argument('--bert_model', type=str, default='xlm-roberta-large', help='backbone bert model for training (default: xlm-roberta-large)')
args = parser.parse_args()
main(args)
|
[
"discone1008@gmail.com"
] |
discone1008@gmail.com
|
88f3a978e1ccdf33914b845f1988779d03433a82
|
3a2af7b4b801d9ba8d78713dcd1ed57ee35c0992
|
/zerver/migrations/0051_realmalias_add_allow_subdomains.py
|
dec9cce79560fb47f11fae6a6962e964cc2a4a00
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
timabbott/zulip
|
2b69bd3bb63539adbfc4c732a3ff9d52657f40ac
|
42f239915526180a1a0cd6c3761c0efcd13ffe6f
|
refs/heads/master
| 2023-08-30T21:45:39.197724
| 2020-02-13T23:09:22
| 2020-06-25T21:46:33
| 43,171,533
| 6
| 9
|
Apache-2.0
| 2020-02-24T20:12:52
| 2015-09-25T19:34:16
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
# Generated by Django 1.10.5 on 2017-01-25 20:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0050_userprofile_avatar_version'),
]
operations = [
migrations.AddField(
model_name='realmalias',
name='allow_subdomains',
field=models.BooleanField(default=False),
),
migrations.AlterUniqueTogether(
name='realmalias',
unique_together={('realm', 'domain')},
),
]
|
[
"tabbott@zulipchat.com"
] |
tabbott@zulipchat.com
|
60330c9e9345b3a0b3454a501e4ac549027af252
|
00869cee40a2fa1ca3f82b77103874d68d8a8583
|
/Language_Detection/adbclassify.py
|
2caa9231b89056e1f6b5a2ab0536543d3a6eeae8
|
[] |
no_license
|
Keshav-Patil/EEG_based_classification
|
4de56f1b3f1d3f937c154a0078c1c01f58d62ad1
|
3ad4ff75db307d254d86fb0e8f51a97da7c94357
|
refs/heads/master
| 2021-03-13T03:20:26.843230
| 2017-05-16T18:44:32
| 2017-05-16T18:44:32
| 91,491,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,150
|
py
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix
#from sklearn.model_selection import cross_val_score
import numpy as np
import itertools
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from sklearn import svm, datasets
#from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Accent):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
C = 1
kernel = 'rbf'
iterations = 1000
m_accuracysvm = []
m_accuracyrf = []
m_accuracyab = []
X = pd.read_csv('inputvalrand_lan.txt',sep=',',header = None)
Y = np.loadtxt("outputvalrand_lan.txt", delimiter='\n')
#X = normalize(X)
pca = PCA(n_components=15,copy=True,whiten=True)
pca.fit(X)
X = pca.transform(X)
fig = plt.figure()
#ax = fig.gca(projection='3d')
ax = fig.add_subplot(111, projection='3d')
cxaxis=[]
cyaxis=[]
czaxis=[]
target = open("adb_lanscores.txt", 'w')
line="l_rate \t| 0.50 \t 0.6 \t 0.70 \t 0.80 \t 0.9\n"
target.write(line)
line=" \t______________________________\n"
target.write(line)
X_train,X_test,y_train,y_test = train_test_split(X,Y,test_size=0.33,random_state=7)
estim = 50
l_rate = 0.5
max =0
maxe=0
maxl=0
while estim<100:
l_rate=0.5
line=""
print "Estimators: ",estim, "\t| ",
line+="Estim: "+str(estim)+"\t| "
while l_rate < 0.91 :
cxaxis.append(estim)
cyaxis.append(l_rate)
ab = AdaBoostClassifier(n_estimators = estim,learning_rate=l_rate)
ab.fit(X_train,y_train)
score = ab.score(X_test,y_test)
czaxis.append(score)
if max < score:
max = score
maxe = estim
maxl = l_rate
print ("%.4f" % score), " ",
line+=("%.4f" % score)+" "
l_rate+=0.1
estim+=10
line+="\n"
target.write(str(line))
print
print "Score: ",max," Estimator: ",maxe," learning rate: ",maxl
line="MaxScore: "+str(max)+" Estimator: "+str(maxe)+" Learning rate: "+str(maxl)
target.write(line)
target.close()
xaxis=np.array(cxaxis)
yaxis=np.array(cyaxis)
hist, xedges, yedges = np.histogram2d(xaxis, yaxis,bins=4)
xpos, ypos = np.meshgrid(xedges-5, yedges-0.05)
xpos = xaxis.flatten('F')-5
ypos = yaxis.flatten('F')-0.05
zpos= np.zeros_like(xpos)+0.65
print xpos.shape
print ypos.shape
print zpos.shape
# Construct arrays with the dimensions for the 16 bars.
dx = 10 * np.ones_like(zpos)
dy = 0.1 * np.ones_like(zpos)
dz = np.array(czaxis).flatten()-0.65
colors = ['r','g','b']
surf = ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='r', zsort='average')
ax.set_xlim3d(45, 100)
ax.set_ylim3d(0.40,1)
ax.set_zlim3d(0.65,0.85)
plt.show()
"""
xaxis, yaxis = np.meshgrid(xaxis, yaxis)
zaxis=np.array(czaxis)
surf = ax.plot_wireframe(xaxis, yaxis, zaxis)
#fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
"""
"""
y_pred = rf.predict(X_test)
predictions = rf.predict(X)
# round predictions
rounded = [round(x) for x in predictions]
print rounded
# m_accuracyab.append(svm.score(X_test,y_test))
#m_accuracyrf.append(rf.score(X_test,y_test))
# m_accuracyab.append(ab.score(X_test,y_test))
#print "Accuracies:"
#print "SVM:", np.mean(m_accuracysvm)
print "RF: ", np.mean(m_accuracyrf)
#print "ADAB: ", np.mean(m_accuracyab)
yint =[]
tp=1
fn=1
tn=1
fp=1
for i in Y:
yint.append(i)
print yint
for i in range(len(yint)):
if yint[i]==rounded[i]:
if yint[i]==1:
tp+=1
else:
tn+=1
else:
if yint[i]==1:
fn+=1
else:
fp+=1
print "true positive: ",tp
print "false positive: ",fp
print "false negative: ",fn
print "true negative: ",tn
conf = confusion_matrix(y_test, y_pred,labels=[1,0])
class_names=[0,1]
print conf
plt.figure()
plot_confusion_matrix(conf, classes=class_names,title='Confusion matrix, without normalization')
plt.show()
"""
|
[
"keshavdpatil370@gmail.com"
] |
keshavdpatil370@gmail.com
|
321d7fac24ec2ab49e677d41746168cfef37cb36
|
7c14b4caa46e39f13a21c3582f7840d4cfa6dd2f
|
/logger.py
|
1f7a40c0b8b0fd4145330c69132daafda7ca7a2a
|
[
"MIT"
] |
permissive
|
hrnoh/autovc
|
fccbd5bd390204afc1db84cdb45024e3c7e7e74b
|
c434b1939338e1fe034e2f8cde473d4dd83e3c1f
|
refs/heads/master
| 2020-11-28T19:09:10.178784
| 2020-01-13T11:58:18
| 2020-01-13T11:58:18
| 229,899,398
| 0
| 0
|
MIT
| 2019-12-24T08:04:58
| 2019-12-24T08:04:57
| null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# import tensorflow as tf
from tensorboardX import SummaryWriter
class Logger(object):
"""Using tensorboardX such that need no dependency on tensorflow."""
def __init__(self, log_dir):
"""Initialize summary writer."""
self.writer = SummaryWriter(log_dir)
def scalar_summary(self, tag, value, step):
self.writer.add_scalar(tag, value, step)
|
[
"hr_noh@korea.ac.kr"
] |
hr_noh@korea.ac.kr
|
01fa61b61414d147b0eea7f2609800fd9d303acb
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/codeabbey/_Python_Problem_Solving-master/Greatest Common Divisor.py
|
a5d6275ab69ff29ca8c3202f4e265872f942f71d
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 363
|
py
|
a = int(input())
string = ''
for i in range(a):
temp1,temp2 = num1, num2 = [int(ele) for ele in input().split()]
while num1 != num2:
if num1 > num2:
num1 = num1 - num2
else:
num2 = num2 - num1
lcm = temp1 * temp2 / num1
string += '('+str(num1)+' '+str(int(lcm))+')'
string += ' '
print(string)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
9f46d730ed7df2609c03ef721ca77d732286573c
|
001014c8a5f7fb9d91deac4ea8392d8980917476
|
/main.py
|
17f66fe9dd873fbf79e821a1c9278993f4a714da
|
[] |
no_license
|
lnfrnkln/blogz
|
0ecb95054f6b6e4a9639af245de111a895a61acd
|
c5c0ff7d684b588db4536452d7d2bdce6df41153
|
refs/heads/master
| 2020-05-23T11:20:20.114914
| 2019-05-15T23:40:43
| 2019-05-15T23:40:43
| 186,734,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,107
|
py
|
from flask import Flask, request, redirect, render_template, session, flash
from flask_sqlalchemy import SQLAlchemy
from hashutils import make_pw_hash, check_pw_hash
from datetime import datetime
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://blogz:Blogz1111@localhost:8889/blogz'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = '\x0c\xb9\x1b}\x9a\x1al\xf9\x04\x95\xe8.\xa2G\xedF\xc4m\xa1\x87\xc7\x88\x9c\xce'
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120), unique=True)
body = db.Column(db.String(120))
pub_date=db.Column(db.DateTime,nullable=False,default=datetime.utcnow)
owner_id=db.Column(db.Integer,db.ForeignKey('user.id'))
def __init__(self, title,body, owner):
self.title = title
self.body = body
self.owner=owner
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(120), unique=True)
pw_hash = db.Column(db.String(120))
blogs = db.relationship('Blog', backref='owner')
def __init__(self, username, password):
self.username = username
self.pw_hash = make_pw_hash(password)
@app.before_request
def require_login():
allowed_routes = ['login','blog','signup', 'index']
if request.endpoint not in allowed_routes and 'username' not in session:
return redirect('/login')
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
user = User.query.filter_by(username=username).first()
if user and check_pw_hash(password, user.pw_hash):
session['username'] = username
flash("Logged in")
return redirect('/newpost')
elif user and not check_pw_hash(password, user.pw_hash):
flash('user password is incorrect', 'error')
elif not user:
flash('user does not exist', 'error')
return render_template('login.html')
@app.route('/signup', methods=['POST', 'GET'])
def signup():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
verify = request.form.get('verify')
if username=='' or password=='' or verify == '':
flash('one or more fields are empty', 'error')
elif verify != password:
flash('password does not match', 'error')
elif len(username)<3 or len(password)<3:
flash('username or password length must be longer than 3', 'error')
existing_user = User.query.filter_by(username=username).first()
if not existing_user:
new_user = User(username, password)
db.session.add(new_user)
db.session.commit()
session['username'] = username
flash('successfully signed up!')
return redirect('/newpost')
else:
flash('username is already taken', 'error')
return render_template('signup.html')
@app.route('/logout')
def logout():
del session['username']
return redirect('/blog')
@app.route('/', methods=['POST','GET'])
def index():
users=User.query.all()
return render_template('index.html', users=users)
@app.route('/newpost', methods=['POST', 'GET'])
def newpost():
owner = User.query.filter_by(username=session['username']).first()
if request.method == 'POST':
new_blog_title = request.form.get('title')
new_blog_body = request.form.get('body')
if new_blog_title == '':
flash('Please fill in the title', 'error')
elif new_blog_body=='':
flash('Please fill in the body', 'error')
else:
new_blog=Blog(new_blog_title,new_blog_body, owner)
db.session.add(new_blog)
db.session.commit()
return render_template('individual.html', blog=new_blog)
return render_template ('newpost.html')
@app.route('/blog', methods=['POST','GET'])
def blog():
page_num=request.args.get('page',1, type=int)
blogs = Blog.query.order_by(Blog.pub_date.desc()).paginate(page=page_num, per_page=3)
users = User.query.all()
blog_id = request.args.get("id")
user_username = request.args.get("user")
if blog_id:
blog = Blog.query.filter_by(id=blog_id).first()
return render_template('individual.html', blog=blog)
if user_username:
page=request.args.get('page',1,type=int)
user = User.query.filter_by(username=user_username).first()
user_blogs = Blog.query.filter_by(owner_id=user.id).order_by(Blog.pub_date.desc()).paginate(page=page, per_page=4)
return render_template('singleUser.html', blogs=user_blogs, user=user)
else:
return render_template('blog.html', blogs=blogs)
if __name__ == '__main__':
app.run()
|
[
"lnfrnkln@gmail.com"
] |
lnfrnkln@gmail.com
|
002f0f637f88023d65540b178cd521580a962c12
|
f8862067042e61bd360802eac050477781985f89
|
/stg/gaussian_elimination.py
|
d20ab1b19b9645694016553fc775fbefc1d36f2d
|
[] |
no_license
|
sina33/heft
|
12d6536b2b716ea2d8bcfaa98b6c2676a7d140c6
|
c2d8d9c1f66921134fe3f1bbe0bd9563dd5c32a9
|
refs/heads/master
| 2020-04-03T02:18:25.603260
| 2018-12-06T21:11:23
| 2018-12-06T21:11:23
| 154,952,511
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,553
|
py
|
total_cores = 4
low_perf_multiplier = 2
dag={1:(2,3,4,5,6,7),
2:(19,),
3:(7,8),
4:(9,),
5:(10,),
6:(11,),
7:(8,9,10,11,12),
8:(19,),
9:(12,13),
10:(14,),
11:(15,),
12:(13,14,15,16),
13:(19,),
14:(16,17),
15:(18,),
16:(17,18),
17:(19,),
18:(19,),
19:()}
def compcost(job, agent):
if(job==0):
return 0
if(job==1):
if agent == 'a' or agent == 'b':
return 80 * low_perf_multiplier
else:
return 80
if(job==2):
if agent == 'a' or agent == 'b':
return 40 * low_perf_multiplier
else:
return 40
if(job==3):
if agent == 'a' or agent == 'b':
return 40 * low_perf_multiplier
else:
return 40
if(job==4):
if agent == 'a' or agent == 'b':
return 40 * low_perf_multiplier
else:
return 40
if(job==5):
if agent == 'a' or agent == 'b':
return 40 * low_perf_multiplier
else:
return 40
if(job==6):
if agent == 'a' or agent == 'b':
return 40 * low_perf_multiplier
else:
return 40
if(job==7):
if agent == 'a' or agent == 'b':
return 60 * low_perf_multiplier
else:
return 60
if(job==8):
if agent == 'a' or agent == 'b':
return 30 * low_perf_multiplier
else:
return 30
if(job==9):
if agent == 'a' or agent == 'b':
return 30 * low_perf_multiplier
else:
return 30
if(job==10):
if agent == 'a' or agent == 'b':
return 30 * low_perf_multiplier
else:
return 30
if(job==11):
if agent == 'a' or agent == 'b':
return 30 * low_perf_multiplier
else:
return 30
if(job==12):
if agent == 'a' or agent == 'b':
return 40 * low_perf_multiplier
else:
return 40
if(job==13):
if agent == 'a' or agent == 'b':
return 20 * low_perf_multiplier
else:
return 20
if(job==14):
if agent == 'a' or agent == 'b':
return 20 * low_perf_multiplier
else:
return 20
if(job==15):
if agent == 'a' or agent == 'b':
return 20 * low_perf_multiplier
else:
return 20
if(job==16):
if agent == 'a' or agent == 'b':
return 20 * low_perf_multiplier
else:
return 20
if(job==17):
if agent == 'a' or agent == 'b':
return 10 * low_perf_multiplier
else:
return 10
if(job==18):
if agent == 'a' or agent == 'b':
return 10 * low_perf_multiplier
else:
return 10
if(job==19):
return 0
def commcost(ni, nj, A, B):
return 0
if(A==B):
return 0
else:
if(ni==1 and nj==2):
return 60
if(ni==1 and nj==3):
return 60
if(ni==1 and nj==4):
return 60
if(ni==1 and nj==5):
return 60
if(ni==1 and nj==6):
return 60
if(ni==1 and nj==7):
return 60
if(ni==3 and nj==7):
return 40
if(ni==3 and nj==8):
return 40
if(ni==4 and nj==9):
return 40
if(ni==5 and nj==10):
return 40
if(ni==6 and nj==11):
return 40
if(ni==7 and nj==8):
return 60
if(ni==7 and nj==9):
return 60
if(ni==7 and nj==10):
return 60
if(ni==7 and nj==11):
return 60
if(ni==7 and nj==12):
return 60
if(ni==9 and nj==12):
return 40
if(ni==9 and nj==13):
return 40
if(ni==10 and nj==14):
return 40
if(ni==11 and nj==15):
return 40
if(ni==12 and nj==13):
return 60
if(ni==12 and nj==14):
return 60
if(ni==12 and nj==15):
return 60
if(ni==12 and nj==16):
return 60
if(ni==14 and nj==16):
return 40
if(ni==14 and nj==17):
return 40
if(ni==15 and nj==18):
return 40
if(ni==16 and nj==17):
return 60
if(ni==16 and nj==18):
return 60
else:
return 0
|
[
"sina.saeedi.edu@gmail.com"
] |
sina.saeedi.edu@gmail.com
|
8bc5e1adedaef3838104de40f4b1a42ff9eacbbb
|
7645871c891fa84ba70689ff4b466d6e1bde36b0
|
/blog/migrations/0001_initial.py
|
95acf82171b107f67f89c303fe1564100b7680b9
|
[] |
no_license
|
NagaSrinivasulu/my_blog_django
|
adf087a884e95b59e2ff7b5e1cbc0144315a6ba6
|
c0c9f9b0f989b6d66a532e5c7b969869284451de
|
refs/heads/master
| 2020-03-29T21:21:28.362410
| 2018-09-27T09:16:26
| 2018-09-27T09:16:26
| 150,362,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.1.1 on 2018-09-25 07:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"nagasrinivasdaggumilli@gmail.com"
] |
nagasrinivasdaggumilli@gmail.com
|
c5b75b969940a862b60325eb6fdc193c6654cbfc
|
72ff62840eb781451dec907b58010c39a5e46af1
|
/brain/MCTS.py
|
59e48df31be89ab5d3b9df4118f4d3b9c80a0ffa
|
[] |
no_license
|
xiyanggudao/AIChineseChess
|
1999c3d87e0757b5613d5d79364d1803f00af502
|
a31a5d5f5399c0775c0a61224bc2505e061cb734
|
refs/heads/master
| 2021-01-01T19:54:00.425167
| 2018-01-13T16:16:56
| 2018-01-13T16:16:56
| 98,715,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,262
|
py
|
import numpy as np
from chess.MoveGenerator import MoveGenerator
from chess.Chessgame import Chessgame
from chess.ChessData import Move
from chess.Chessman import Chessman
class MCTS:
def __init__(self, brain, maxNodes):
self.brain = brain
maxEdges = maxNodes*128
self.maxNodes = maxNodes
self.maxEdges = maxEdges
self.W = np.empty(maxEdges, np.float32)
self.N = np.empty(maxEdges, np.int32)
self.Q = np.empty(maxEdges, np.float32)
self.P = np.empty(maxEdges, np.float32)
self.linkTo = np.empty(maxEdges, np.int32)
self.move = np.empty((maxEdges, 4), np.float32)
self.IsEdgeUsed = np.empty(maxEdges, np.bool_)
self.edgeUseCnt = 0
self.edgeCurrent = 0
self.nodesToEdge = np.empty((maxNodes, 128), np.int32)
self.nodesToEdgeCnt = np.empty(maxNodes, np.int32)
self.IsNodeUsed = np.empty(maxNodes, np.bool_)
self.nodeBoard = [None for i in range(maxNodes)]
self.nodeUseCnt = 0
self.root = None
self.nodeCurrent = 0
def newEdge(self):
assert self.edgeUseCnt < self.maxEdges
while self.IsEdgeUsed[self.edgeCurrent]:
self.edgeCurrent = (self.edgeCurrent + 1)%self.maxEdges
self.edgeUseCnt += 1
self.IsEdgeUsed[self.edgeCurrent] = True
return self.edgeCurrent
def newNode(self):
assert self.nodeUseCnt < self.maxNodes
while self.IsNodeUsed[self.nodeCurrent]:
self.nodeCurrent = (self.nodeCurrent + 1)%self.maxNodes
self.nodeUseCnt += 1
self.IsNodeUsed[self.nodeCurrent] = True
return self.nodeCurrent
def releaseNode(self, node):
assert self.IsNodeUsed[node]
for i in range(self.nodesToEdgeCnt[node]):
edge = self.nodesToEdge[self.root, i]
self.releaseEdge(edge)
self.nodeUseCnt -= 1
self.IsNodeUsed[node] = False
def releaseEdge(self, edge):
assert self.IsEdgeUsed[edge]
if self.linkTo[edge] != -1:
self.releaseNode(self.linkTo[edge])
self.edgeUseCnt -= 1
self.IsEdgeUsed[edge] = False
def createNode(self, game):
moves = MoveGenerator(game).generateLegalMoves()
newNode = self.newNode()
self.nodeBoard[newNode] = game.ucciFen()
self.nodesToEdgeCnt[newNode] = len(moves)
if len(moves) < 1:
return (-1, -1)
p, v = self.brain.generate(game, moves)
for i in range(len(moves)):
newEdge = self.newEdge()
self.nodesToEdge[newNode, i] = newEdge
self.linkTo[newEdge] = -1
self.move[newEdge, 0] = moves[i].fromPos[0]
self.move[newEdge, 1] = moves[i].fromPos[1]
self.move[newEdge, 2] = moves[i].toPos[0]
self.move[newEdge, 3] = moves[i].toPos[1]
self.W[newEdge] = 0
self.N[newEdge] = 0
self.Q[newEdge] = 0
self.P[newEdge] = p[i]
return (newNode, v)
def clear(self):
self.IsEdgeUsed.fill(False)
self.edgeUseCnt = 0
self.IsNodeUsed.fill(False)
self.nodeUseCnt = 0
def setRoot(self, game):
self.clear()
self.root, v = self.createNode(game)
def moveRoot(self, edge):
newRoot = None
assert self.root != None
for i in range(self.nodesToEdgeCnt[self.root]):
e = self.nodesToEdge[self.root, i]
if e == edge:
assert newRoot == None
assert self.linkTo[edge] != -1
newRoot = self.linkTo[edge]
self.linkTo[edge] = -1
assert newRoot != None
self.releaseNode(self.root)
self.root = newRoot
def PUCT(self, q, p, sqrtSumN, n):
return q + p*sqrtSumN/(1+n)
def select(self, node):
if self.nodesToEdgeCnt[node] < 1:
return -1
sumN = 0
for i in range(self.nodesToEdgeCnt[node]):
edge = self.nodesToEdge[node, i]
sumN += self.N[edge]
sqrtSumN = sumN ** 0.5
selected = self.nodesToEdge[node, 0]
max = self.PUCT(self.Q[selected], self.P[selected], sqrtSumN, self.N[selected])
for i in range(1, self.nodesToEdgeCnt[node]):
edge = self.nodesToEdge[node, i]
puct = self.PUCT(self.Q[edge], self.P[edge], sqrtSumN, self.N[edge])
if puct > max:
selected = edge
max = puct
return selected
def backup(self, edge, value):
self.N[edge] += 1
self.W[edge] += value
self.Q[edge] = self.W[edge]/self.N[edge]
def expandNode(self, node):
edge = self.select(node)
if edge == -1:
return -1
if self.linkTo[edge] == -1:
game = Chessgame()
game.setWithUcciFen(self.nodeBoard[node])
fx = self.move[edge, 0]
fy = self.move[edge, 1]
tx = self.move[edge, 2]
ty = self.move[edge, 3]
game.makeMove((fx, fy), (tx, ty))
self.linkTo[edge], v = self.createNode(game)
ret = -v
else:
ret = -self.expandNode(self.linkTo[edge])
self.backup(edge, ret)
return ret
def expand(self):
self.expandNode(self.root)
def pi(self, n, sumN):
return n/sumN
def selectToMove(self, node):
if self.nodesToEdgeCnt[node] < 1:
return -1
sumN = 0
for i in range(self.nodesToEdgeCnt[node]):
edge = self.nodesToEdge[node, i]
sumN += self.N[edge]
selected = self.nodesToEdge[node, 0]
max = self.pi(self.N[selected], sumN)
for i in range(1, self.nodesToEdgeCnt[node]):
edge = self.nodesToEdge[node, i]
pi = self.pi(self.N[edge], sumN)
if pi > max:
selected = edge
max = pi
return selected
def play(self):
edge = self.selectToMove(self.root)
move = None
if edge != -1:
self.moveRoot(edge)
fx = self.move[edge, 0]
fy = self.move[edge, 1]
tx = self.move[edge, 2]
ty = self.move[edge, 3]
move = Move((fx, fy), (tx, ty), Chessman.invalid(), Chessman.invalid())
return move
|
[
"1766500102@qq.com"
] |
1766500102@qq.com
|
a8dd63d37641bf5d2dbd04c750a68c9fc500b906
|
d1fffb8ab83561b4dc588b7d87ed0a16d440a8a3
|
/HW09_ReSub_Rahul_Kampati.py
|
e02a97d758f77db7ae22cd29f731d4be4db9128d
|
[] |
no_license
|
rahulkris1/SSW810
|
6cba2892211f2b34d3e00982770fd4a9fbe6173e
|
a88634aabe82e2439cee0d7eb6bacabe0a22b78f
|
refs/heads/master
| 2020-09-07T17:19:23.889631
| 2019-11-29T18:34:11
| 2019-11-29T18:34:11
| 220,858,008
| 0
| 0
| null | 2019-11-29T18:34:12
| 2019-11-10T22:10:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,695
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 26 November 2019, 08:00
@author: Kampati Rahul
Creation of a data repository for students and instructors to keep track of data
"""
import os
from collections import defaultdict
from prettytable import PrettyTable
class Student:
def __init__(self, cwid, name, major):
""" Students class to hold students data"""
self.cwid = cwid
self.name = name
self.major = major
self.course_grade_dict = defaultdict(str)
def course_grade_student(self, course, grade):
""" Assign grade of each course"""
self.course_grade_dict[course] = grade
def prettyTable_student(self):
""" Structuring data for pretty table for students"""
return [self.cwid, self.name, sorted(self.course_grade_dict.keys())]
class Instructor:
def __init__(self, cwid, name, dept):
""" instructors class to hold students data"""
self.cwid = cwid
self.name = name
self.dept = dept
self.course_inst_dict = defaultdict(int)
def num_course_students(self, course):
""" Assign number of students under each professor"""
self.course_inst_dict[course] += 1
def prettyTable_instructor(self):
""" Structuring data for pretty table for students"""
for course in self.course_inst_dict:
yield [self.cwid, self.name, self.dept, course, self.course_inst_dict[course]]
class Repository:
def __init__(self, directory):
""" repository class to hold the students, instructors and grades data"""
self.directory = directory
self.student_dict = {}
self.instructor_dict = {}
self.student_analyser()
self.instructor_analyser()
self.grades_analyser()
self.students_summary()
self.instructors_summary()
def student_analyser(self):
""" Analyse Students.txt data file"""
if not os.path.exists(self.directory):
raise FileNotFoundError("Directory not found")
file_students = os.path.join(self.directory, 'students.txt')
for cwid, name, major in self.file_reading_gen(file_students, 3, "\t", False):
self.student_dict[cwid] = Student(cwid, name, major)
def instructor_analyser(self):
""" Analyse Instructors.txt data file"""
if not os.path.exists(self.directory):
raise FileNotFoundError("Directory not found")
file_instructors = os.path.join(self.directory, 'instructors.txt')
for cwid, name, dept in self.file_reading_gen(file_instructors, 3, "\t", False):
self.instructor_dict[cwid] = Instructor(cwid, name, dept)
def grades_analyser(self):
""" Analyse grades.txt data file"""
if not os.path.exists(self.directory):
raise FileNotFoundError("Directory not found")
file_grades = os.path.join(self.directory, 'grades.txt')
for studentCwid, course, grade, instructorCwid in self.file_reading_gen(file_grades, 4, "\t", False):
if studentCwid in self.student_dict.keys():
self.student_dict[studentCwid].course_grade_student(course, grade)
else:
print(f"Invalid student cwid {studentCwid}")
if instructorCwid in self.instructor_dict.keys():
self.instructor_dict[instructorCwid].num_course_students(course)
else:
print(f"Invalid Instructor id {instructorCwid}")
def file_reading_gen(self, path, fields, sep, header=False):
"""Generator function that reads a flie and returns one line at a time."""
try:
fp = open(path, 'r')
except FileNotFoundError:
raise FileNotFoundError("Unable to open the file path provided")
else:
with fp:
if header:
header_info = next(fp)
if len(header_info.split(sep)) != fields:
raise ValueError(f"File path has {len(header_info.split(sep))} invalid number of fields instead of {fields}")
for line in fp:
if len(line.split(sep)) != fields:
raise ValueError(f" file has {len(next(fp.split(sep)))} fields instead of {fields} ")
else:
line = line.strip().split(sep)
yield tuple(line)
def students_summary(self):
""" Summarising the students data"""
tb_student = PrettyTable(field_names = ["CWID", "Name", "Completed Courses"])
for inst_student in self.student_dict.values():
tb_student.add_row(inst_student.prettyTable_student())
print("Student Summary")
print(tb_student)
def instructors_summary(self):
""" Summarising the Instructors data"""
tb_instructor = PrettyTable(field_names = ["CWID", "Name", "Dept", "Course", "Students"])
for inst_instructor in self.instructor_dict.values():
for instructor_data in inst_instructor.prettyTable_instructor():
tb_instructor.add_row(instructor_data)
print("Instructor Summary")
print(tb_instructor)
def main():
try:
Repository("C:/Users/HP/Desktop/redo/file_09")
except Exception as e:
print(e)
if __name__ == "__main__":
main()
|
[
"rahulkrishkampati@gmail.com"
] |
rahulkrishkampati@gmail.com
|
72adbd0109a8bea3523886155b79efd08bf30fe3
|
f4f2f8f85da06dda03435ad225c11f1a5dceeeec
|
/UI_UX/ui_app/utils/datasets.py
|
6bf51815f5c762e74aa9e157836aa752d43c9d7f
|
[] |
no_license
|
Djangojuniors/Sandeep_STW
|
f1ca6a244129a1bf8104e132be7948d42d557a9a
|
194e328920fab98d14bae3ac7dd87abcca09aadf
|
refs/heads/master
| 2023-08-25T15:00:08.289651
| 2021-10-23T08:10:47
| 2021-10-23T08:10:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,361
|
py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from .utils import xyxy2xywh, xywh2xyxy
from django.core.files.storage import default_storage
from django.conf import settings
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
print("PATH", path)
files = []
if os.path.isdir(os.path.join(settings.MEDIA_ROOT, path)):
print("IF DIR")
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(os.path.join(settings.MEDIA_ROOT, path)):
print("IS file")
files = [path]
print("FILES", files)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
print(nI, nV)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
print(self.nF)
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(os.path.join(settings.MEDIA_ROOT, path)) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(os.path.join(settings.MEDIA_ROOT, sources), 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect, interp=cv2.INTER_LINEAR)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_labels=True, cache_images=False, single_cls=False):
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), 'File not found %s. See %s' % (path, help_url)
with open(os.path.join(settings.MEDIA_ROOT, path), 'r') as f:
self.img_files = [x.replace('/', os.sep) for x in f.read().splitlines() # os-agnostic
if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shapefile path
try:
with open(os.path.join(settings.MEDIA_ROOT, sp), 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32.).astype(np.int) * 32
# Preload labels (required for weighted CE training)
self.imgs = [None] * n
self.labels = [None] * n
if cache_labels or image_weights: # cache labels for faster training
self.labels = [np.zeros((0, 5))] * n
extract_bounding_boxes = False
create_datasubset = False
pbar = tqdm(self.label_files, desc='Caching labels')
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
for i, file in enumerate(pbar):
try:
with open(os.path.join(settings.MEDIA_ROOT, file), 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open(os.path.join(settings.MEDIA_ROOT, './datasubset/images.txt'), 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
nf, nm, ne, nd, n)
assert nf > 0, 'No labels found. See %s' % help_url
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
img_path = self.img_files[index]
label_path = self.label_files[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(os.path.join(settings.MEDIA_ROOT, label_path), 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, img_path, shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
img_path = self.img_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (self.augment and (r != 1)): # always resize down, only resize up if training with augmentation
interp = cv2.INTER_LINEAR if self.augment else cv2.INTER_AREA # LINEAR for training, AREA for testing
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
x = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
img_hsv = (cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x).clip(None, 255).astype(np.uint8)
np.clip(img_hsv[:, :, 0], None, 179, out=img_hsv[:, :, 0]) # inplace hue clip (0 - 179 deg)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
img4 = np.zeros((s * 2, s * 2, 3), dtype=np.uint8) + 128 # base image with 4 tiles
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Load labels
label_path = self.label_files[index]
if os.path.isfile(label_path):
x = self.labels[index]
if x is None: # labels not preloaded
with open(os.path.join(settings.MEDIA_ROOT, label_path), 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
else:
labels = np.zeros((0, 5), dtype=np.float32)
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'] * 1,
translate=self.hyp['translate'] * 1,
scale=self.hyp['scale'] * 1,
shear=self.hyp['shear'] * 1,
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(128, 128, 128),
auto=True, scaleFill=False, scaleup=True, interp=cv2.INTER_AREA):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = max(new_shape) / max(shape)
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=interp) # INTER_AREA is better, INTER_LINEAR is faster
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None: # targets = [cls, xyxy]
targets = []
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
changed = (border != 0) or (M != np.eye(3)).any()
if changed:
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_AREA, borderValue=(128, 128, 128))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(os.path.join(settings.MEDIA_ROOT, file), 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(os.path.join(settings.MEDIA_ROOT, file.replace('.txt', 'bmp.txt')), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(os.path.join(settings.MEDIA_ROOT, p), 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(os.path.join(settings.MEDIA_ROOT, p), 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(os.path.join(settings.MEDIA_ROOT, path), 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
[
"djnagojuniors@gmail.com"
] |
djnagojuniors@gmail.com
|
a550ade57cabd95af59866ef0b3793d275a0b219
|
59716f114a27f15733701291943fc59b17d886fe
|
/temp.py
|
17d0b374c1ff2f96eefc8733232fc1c448df9ca3
|
[] |
no_license
|
hq20051252/HTML-widget-Parser
|
86e2dca65ba35c0137ec2efbde7bd4622e9bcdfe
|
88abbebcd341ae0d07400699d83dd2493d80c7ba
|
refs/heads/master
| 2016-09-05T16:19:23.962128
| 2011-07-19T09:03:08
| 2011-07-19T09:03:08
| 2,033,641
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
import MySQLdb
import sql_spec
q = sql_spec.connect('localhost','root','88888888','qiud')
qcur= q.cursor()
qcur.execute('''select player_id from playerinfo''')
result = qcur.fetchall()
fd = open('record.txt','w')
for r in result:
fd.write(r[0]+'\n');
|
[
"hq20051252@163.com"
] |
hq20051252@163.com
|
92c78edbaa8a2661475a645022fa803e8a578c1e
|
f588ee667014ac20dea5a6621c9375ffead2b2e3
|
/pedidos/migrations/0002_auto_20200804_0002.py
|
dac20be0e628fcedc6d641f2edf1af7014223327
|
[] |
no_license
|
alevarisco/Pyzzeria
|
39958df6bfe06861f1957a8bb9f7dff07259bff8
|
6ea178883dfbd9de34e30fb342fd98fc4dd25c08
|
refs/heads/master
| 2022-11-26T14:50:03.234316
| 2020-08-06T21:13:47
| 2020-08-06T21:13:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# Generated by Django 3.0.8 on 2020-08-04 04:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pedidos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pizza_ingrediente',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
|
[
"techiendca@gmail.com"
] |
techiendca@gmail.com
|
3d58de779e6e9ce278cac6d0c11ec7646a8fb43e
|
8f3336bbf7cd12485a4c52daa831b5d39749cf9b
|
/Python/maximum-average-subarray-i.py
|
a92f4789fc9c877e00d034b9d34aa0c4a577f269
|
[] |
no_license
|
black-shadows/LeetCode-Topicwise-Solutions
|
9487de1f9a1da79558287b2bc2c6b28d3d27db07
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
refs/heads/master
| 2022-05-30T22:16:38.536678
| 2022-05-18T09:18:32
| 2022-05-18T09:18:32
| 188,701,704
| 240
| 110
| null | 2020-05-08T13:04:36
| 2019-05-26T15:41:03
|
C++
|
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def findMaxAverage(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
result = total = sum(nums[:k])
for i in xrange(k, len(nums)):
total += nums[i] - nums[i-k]
result = max(result, total)
return float(result) / k
|
[
"noreply@github.com"
] |
black-shadows.noreply@github.com
|
1ef34ae80eb3844baa34b8e6599b9e63e7321d00
|
caee936b6223b79cd144b9c0fd4f6800db6af45a
|
/tanamanhias/store/views.py
|
0d5ba5b3d31e176161107aa8e6c266f1f2d2fd9f
|
[] |
no_license
|
AnandaThalia/Website_RPL
|
5751da3b8ecec54fe628ee7f57ea0c6c209f8750
|
7f4b6f0706d16992bd13d6e9d262a86700ea0218
|
refs/heads/master
| 2022-08-07T16:20:08.966062
| 2020-05-18T17:37:49
| 2020-05-18T17:37:49
| 264,828,724
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,510
|
py
|
# view nya si app store
from django.shortcuts import render
from .models import *
from django.http import JsonResponse
import json
import datetime
from .utils import cookieCart,cartData,guestOrder
def store(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
products = Product.objects.all()
context = {'products':products, 'cartItems':cartItems}
return render(request, 'store/store.html', context)
def cart(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
context = {'items':items, 'order':order, 'cartItems':cartItems}
return render(request, 'store/cart.html', context)
def checkout(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
context = {'items':items, 'order':order, 'cartItems':cartItems}
return render(request, 'store/checkout.html', context)
def index(request):
return render(request,'store/index.html')
def updateItem(request):
data = json.loads(request.body)
productId = data['productId']
action = data['action']
print('Action:', action)
print('Product:', productId)
customer = request.user.customer
product = Product.objects.get(id=productId)
order, created = Order.objects.get_or_create(customer=customer, complete=False)
orderItem, created = OrderItem.objects.get_or_create(order=order, product=product)
if action == 'add':
orderItem.quantity = (orderItem.quantity + 1)
elif action == 'remove':
orderItem.quantity = (orderItem.quantity - 1)
orderItem.save()
if orderItem.quantity <= 0:
orderItem.delete()
return JsonResponse('Item was added', safe=False)
def processOrder(request):
return JsonResponse('Payment submitted..', safe=False)
def processOrder(request):
transaction_id = datetime.datetime.now().timestamp()
return JsonResponse('Payment submitted..', safe=False)
def processOrder(request):
transaction_id = datetime.datetime.now().timestamp()
data = json.loads(request.body)
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
else:
customer,order = guestOrder(request,data)
total = float(data['form']['total'])
order.transaction_id = transaction_id
if total == order.get_cart_total:
order.complete = True
order.save()
if order.shipping == True:
ShippingAddress.objects.create(
customer=customer,
order=order,
address=data['shipping']['address'],
city=data['shipping']['city'],
state=data['shipping']['state'],
zipcode=data['shipping']['zipcode'],
)
return JsonResponse('Payment submitted..', safe=False)
def cart(request):
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
items = order.orderitem_set.all()
cartItems = order.get_cart_items
else:
#Create empty cart for now for non-logged in user
try:
cart = json.loads(request.COOKIES['cart'])
except:
cart = {}
print('CART:', cart)
items = []
order = {'get_cart_total':0, 'get_cart_items':0, 'shipping':False}
cartItems = order['get_cart_items']
for i in cart:
#We use try block to prevent items in cart that may have been removed from causing error
try:
cartItems += cart[i]['quantity']
product = Product.objects.get(id=i)
total = (product.price * cart[i]['quantity'])
order['get_cart_total'] += total
order['get_cart_items'] += cart[i]['quantity']
item = {
'id':product.id,
'product':{'id':product.id,'name':product.name, 'price':product.price,
'imageURL':product.imageURL}, 'quantity':cart[i]['quantity'],
'digital':product.digital,'get_total':total,
}
items.append(item)
if product.digital == False:
order['shipping'] = True
except:
pass
context = {'items':items, 'order':order, 'cartItems':cartItems}
return render(request, 'store/cart.html', context)
|
[
"Ananda_Thalia54@apps.ipb.ac.id"
] |
Ananda_Thalia54@apps.ipb.ac.id
|
133c52f2a3895ef25cbda24a61771d8971cd0763
|
ee0e67450ed00a35cc3c566e456394e4f8f55c52
|
/javsdt/JavbusYouma.py
|
c6bf4228492607134367388da7104720d81b85dd
|
[
"MIT"
] |
permissive
|
wdcew/javsdt
|
495e47c00d4e380c733f1568ba1f76e9f50be65a
|
2f540f7c4f2fa9d0c93da1b428c50044c97d0be4
|
refs/heads/master
| 2023-04-03T09:33:29.451148
| 2021-04-09T00:37:00
| 2021-04-09T00:37:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,285
|
py
|
# -*- coding:utf-8 -*-
import os, re
from shutil import copyfile
from traceback import format_exc
########################################################################################################################
from Class.Settings import Settings
from Class.JavFile import JavFile
from Functions.Status import judge_exist_nfo, judge_exist_extra_folders, count_num_videos
from Functions.User import choose_directory
from Functions.Record import record_start, record_fail, record_warn
from Functions.Process import perfect_dict_data
from Functions.Standard import rename_mp4, rename_folder, classify_files, classify_folder
from Functions.XML import replace_xml, replace_xml_win
from Functions.Process import judge_exist_subtitle
from Functions.Picture import check_picture, add_watermark_subtitle
from Functions.Requests.Download import download_pic
from Functions.Genre import better_dict_genre
# ################################################## 不同 ##########################################################
from Functions.Process import judge_exist_divulge
from Functions.Status import check_actors
from Functions.Car import find_car_bus, list_suren_car
from Functions.Standard import collect_sculpture
from Functions.Baidu import translate
from Functions.Picture import add_watermark_divulge, crop_poster_youma
from Functions.Requests.JavbusReq import get_bus_html
from Functions.Requests.ArzonReq import steal_arzon_cookies, find_plot_arzon
# main开始
print('1、避开21:00-1:00,访问javbus和arzon很慢。\n'
'2、若一直打不开javbus,请在ini中更新防屏蔽网址\n')
# 读取配置文件,这个ini文件用来给用户设置
print('正在读取ini中的设置...', end='')
try:
settings = Settings('有码')
except:
settings = None
print(format_exc())
print('\n无法读取ini文件,请修改它为正确格式,或者打开“【ini】重新创建ini.exe”创建全新的ini!')
os.system('pause')
print('\n读取ini文件成功!\n')
# 路径分隔符:当前系统的路径分隔符 windows是“\”,linux和mac是“/”
sep = os.sep
# 检查头像:如果需要为kodi整理头像,先检查演员头像ini、头像文件夹是否存在。
check_actors(settings.bool_sculpture)
# 局部代理:哪些站点需要代理。
proxy_library, proxy_bus, proxy_321, proxy_db, proxy_arzon, proxy_dmm = settings.get_proxy()
# arzon通行证:如果需要在nfo中写入日语简介,需要先获得合法的arzon网站的cookie,用于通过成人验证。
cookie_arzon = steal_arzon_cookies(proxy_arzon) if settings.bool_plot and settings.bool_nfo else {}
# javbus网址 https://www.buscdn.work/
url_bus = settings.get_url_bus()
# 选择简繁中文以及百度翻译账户:需要简体中文还是繁体中文,影响影片特征和简介。
to_language, tran_id, tran_sk = settings.get_translate_account()
# 信息字典:存放影片信息,用于给用户自定义各种命名。
dict_data = {'车牌': 'ABC-123',
'车牌前缀': 'ABC',
'标题': '有码标题',
'完整标题': '完整有码标题',
'导演': '有码导演',
'片商': '有码片商',
'评分': '0',
'片长': '0',
'系列': '有码系列',
'发行年月日': '1970-01-01', '发行年份': '1970', '月': '01', '日': '01',
'首个演员': '有码演员', '全部演员': '有码演员',
'空格': ' ',
'\\': sep, '/': sep, # 文件路径分隔符
'是否中字': '',
'是否流出': '',
'影片类型': settings.av_type(),
'视频': 'ABC-123', # 当前及未来的视频文件名,不带ext
'原文件名': 'ABC-123', '原文件夹名': 'ABC-123', }
# nfo中title的写法。
list_name_nfo_title = settings.formula_name_nfo_title()
# 额外将哪些元素放入特征中
list_extra_genres = settings.list_extra_genre()
# 重命名视频的格式
list_name_video = settings.formula_rename_video()
# 重命名文件夹的格式
list_name_folder = settings.formula_rename_folder()
# fanart的格式
list_name_fanart = settings.formula_name_fanart()
# poster的格式
list_name_poster = settings.formula_name_poster()
# 视频文件名包含哪些多余的字母数字,需要无视
list_surplus_words_in_filename = settings.list_surplus_word_in_filename('有码')
# 文件名包含哪些特殊含义的文字,判断是否中字
list_subtitle_words_in_filename = settings.list_subtitle_word_in_filename()
# 文件名包含哪些特殊含义的文字,判断是否是无码流出片
list_divulge_words_in_filename = settings.list_divulge_word_in_filename()
# 素人番号:得到事先设置的素人番号,让程序能跳过它们
list_suren_cars = list_suren_car()
# 需要扫描的文件的类型
tuple_video_types = settings.tuple_video_type()
# 完善dict_data,如果用户自定义了一些文字,不在元素中,需要将它们添加进dict_data;list_classify_basis,归类标准,归类目标文件夹的组成公式。
dict_data, list_classify_basis = perfect_dict_data(list_extra_genres, list_name_video, list_name_folder, list_name_nfo_title, list_name_fanart, list_name_poster, settings.custom_classify_basis(), dict_data)
# 优化特征的字典
dict_genre = better_dict_genre('Javbus有码', to_language)
# 用户输入“回车”就继续选择文件夹整理
input_start_key = ''
while input_start_key == '':
# 用户:选择需要整理的文件夹
print('请选择要整理的文件夹:', end='')
root_choose = choose_directory()
print(root_choose)
# 日志:在txt中记录一下用户的这次操作,在某个时间选择了某个文件夹
record_start(root_choose)
# 归类:用户自定义的归类根目录,如果不需要归类则为空
root_classify = settings.check_classify_root(root_choose, sep)
# 计数:失败次数及进度
num_fail = 0 # 已经或可能导致致命错误,比如整理未完成,同车牌有不同视频
num_warn = 0 # 对整理结果不致命的问题,比如找不到简介
num_all_videos = count_num_videos(root_choose, tuple_video_types) # 所选文件夹总共有多少个视频文件
num_current = 0 # 当前视频的编号
print('...文件扫描开始...如果时间过长...请避开夜晚高峰期...\n')
# root【当前根目录】 dirs【子文件夹】 files【文件】,root是str,后两个是list
for root, dirs, files in os.walk(root_choose):
# 什么文件都没有
if not files:
continue
# 当前root是已归类的目录,无需处理
if '归类完成' in root.replace(root_choose, ''):
continue
# 跳过已存在nfo的文件夹,判断这一层文件夹中有没有nfo
if settings.bool_skip and judge_exist_nfo(files):
continue
# 对这一层文件夹进行评估,有多少视频,有多少同车牌视频,是不是独立文件夹
list_jav_struct = [] # 存放:需要整理的jav的结构体
dict_car_pref = {} # 存放:每一车牌的集数, 例如{'abp-123': 1, avop-789': 2}是指 abp-123只有一集,avop-789有cd1、cd2
num_videos_include = 0 # 计数:当前文件夹中视频的数量,可能有视频不是jav
dict_subtitle_files = {} # 存放:jav的字幕文件和车牌对应关系 {'c:\a\abc_123.srt': 'abc-123'}
# 判断文件是不是字幕文件,放入dict_subtitle_files中
for file_raw in files:
file_temp = file_raw.upper()
if file_temp.endswith(('.SRT', '.VTT', '.ASS', '.SSA', '.SUB', '.SMI',)):
# 当前模式不处理FC2
if 'FC2' in file_temp:
continue
# 去除用户设置的、干扰车牌的文字
for word in list_surplus_words_in_filename:
file_temp = file_temp.replace(word, '')
# 得到字幕文件名中的车牌
subtitle_car = find_car_bus(file_temp, list_suren_cars)
# 将该字幕文件和其中的车牌对应到dict_subtitle_files中
if subtitle_car:
dict_subtitle_files[file_raw] = subtitle_car
# print(dict_subtitle_files)
# 判断文件是不是视频,放入list_jav_struct中
for file_raw in files:
file_temp = file_raw.upper()
if file_temp.endswith(tuple_video_types) and not file_temp.startswith('.'):
num_videos_include += 1
num_current += 1
if 'FC2' in file_temp:
continue
for word in list_surplus_words_in_filename:
file_temp = file_temp.replace(word, '')
# 得到视频中的车牌
car = find_car_bus(file_temp, list_suren_cars)
if car:
try:
dict_car_pref[car] += 1 # 已经有这个车牌了,加一集cd
except KeyError:
dict_car_pref[car] = 1 # 这个新车牌有了第一集
# 这个车牌在dict_subtitle_files中,有它的字幕。
if car in dict_subtitle_files.values():
subtitle_file = list(dict_subtitle_files.keys())[list(dict_subtitle_files.values()).index(car)]
del dict_subtitle_files[subtitle_file]
else:
subtitle_file = ''
# 将该jav的各种属性打包好,包括原文件名带扩展名、所在文件夹路径、第几集、所属字幕文件名
jav_struct = JavFile(file_raw, root, car, dict_car_pref[car], subtitle_file, num_current)
list_jav_struct.append(jav_struct)
else:
print('>>无法处理:', root.replace(root_choose, '') + sep + file_raw)
# 判定影片所在文件夹是否是独立文件夹,独立文件夹是指该文件夹仅用来存放该影片,而不是大杂烩文件夹
# 这一层文件夹下有jav
if dict_car_pref:
# 当前文件夹下,车牌不止一个;还有其他非jav视频;有其他文件夹,除了演员头像文件夹“.actors”和额外剧照文件夹“extrafanart”;
if len(dict_car_pref) > 1 or num_videos_include > len(list_jav_struct) or judge_exist_extra_folders(dirs):
bool_separate_folder = False # 不是独立的文件夹
else:
bool_separate_folder = True # 这一层文件夹是这部jav的独立文件夹
else:
continue
# 开始处理每一部jav
for jav in list_jav_struct:
# 告诉用户进度
print('>> [' + str(jav.number) + '/' + str(num_all_videos) + ']:', jav.name)
print(' >发现车牌:', jav.car)
# 判断是否有中字的特征,条件有三满足其一即可:1有外挂字幕 2文件名中含有“-C”之类的字眼 3旧的nfo中已经记录了它的中字特征
if jav.subtitle:
bool_subtitle = True # 判定成功
dict_data['是否中字'] = settings.custom_subtitle_expression # '是否中字'这一命名元素被激活
else:
bool_subtitle = judge_exist_subtitle(root, jav.name_no_ext, list_subtitle_words_in_filename)
dict_data['是否中字'] = settings.custom_subtitle_expression if bool_subtitle else ''
# 判断是否是无码流出的作品,同理
bool_divulge = judge_exist_divulge(root, jav.name_no_ext, list_divulge_words_in_filename)
dict_data['是否流出'] = settings.custom_divulge_expression if bool_divulge else ''
# 影片的相对于所选文件夹的路径,用于报错
path_relative = sep + jav.path.replace(root_choose, '')
# 获取nfo信息的javbus网页
try:
# 用户指定了网址,则直接得到jav所在网址
if '公交车' in jav.name:
url_appointg = re.search(r'公交车(.+?)\.', jav.name)
if str(url_appointg) != 'None':
url_on_web = url_bus + url_appointg.group(1)
else:
num_fail += 1
record_fail(' >第' + str(num_fail) + '个失败!你指定的javbus网址有错误:' + path_relative + '\n')
continue # 【退出对该jav的整理】
# 用户没有指定网址,则去搜索
else:
url_search_web = url_bus + 'search/' + jav.car + '&type=1&parent=ce'
print(' >搜索车牌:', url_search_web)
# 得到javbus搜索网页html
html_web = get_bus_html(url_search_web, proxy_bus)
# 尝试找movie-box
list_search_results = re.findall(r'movie-box" href="(.+?)">', html_web) # 匹配处理“标题”
if list_search_results: # 搜索页面有结果
# print(list_search_results)
# print(' >正在核查搜索结果...')
jav_pref = jav.car.split('-')[0] # 匹配车牌的前缀字母
jav_suf = jav.car.split('-')[-1].lstrip('0') # 当前车牌的后缀数字 去除多余的0
list_fit_results = [] # 存放,车牌符合的结果
for i in list_search_results:
url_end = i.split('/')[-1].upper()
url_suf = re.search(r'[-_](\d+)', url_end).group(1).lstrip('0') # 匹配box上影片url,车牌的后缀数字,去除多余的0
if jav_suf == url_suf: # 数字相同
url_pref = re.search(r'([A-Z0-9]+)[-_]', url_end).group(1).upper() # 匹配处理url所带车牌前面的字母“n”
if jav_pref == url_pref: # 数字相同的基础下,字母也相同,即可能车牌相同
list_fit_results.append(i)
# 有码搜索的结果一个都匹配不上
if not list_fit_results:
num_fail += 1
record_fail(' >第' + str(
num_fail) + '个失败!javbus有码找不到该车牌的信息:' + jav.car + ',' + path_relative + '\n')
continue # 【跳出对该jav的整理】
# 默认用第一个搜索结果
url_on_web = list_fit_results[0]
if len(list_fit_results) > 1:
num_fail += 1
record_fail(' >第' + str(
num_fail) + '个警告!javbus搜索到同车牌的不同视频:' + jav.car + ',' + path_relative + '\n')
# 找不到box
else:
num_fail += 1
record_fail(' >第' + str(
num_fail) + '个失败!javbus有码找不到该车牌的信息:' + jav.car + ',' + path_relative + '\n')
continue # 【跳出对该jav的整理】
# 经过上面的三种情况,可能找到了jav在bus上的网页链接url_on_web
print(' >获取信息:', url_on_web)
# 得到最终的jav所在网页
html_web = get_bus_html(url_on_web, proxy_bus)
# 开始匹配信息
# 有大部分信息的html_web
html_web = re.search(r'(h3>[\s\S]*?)磁力連結投稿', html_web, re.DOTALL).group(1)
# 标题
title = re.search(r'h3>(.+?)</h3', html_web, re.DOTALL).group(1) # javbus上的标题可能占两行
# 去除xml文档和windows路径不允许的特殊字符 &<> \/:*?"<>|
title = replace_xml_win(title)
print(' >影片标题:', title)
# 正则匹配 影片信息 开始!
# title的开头是车牌号,想要后面的纯标题
car_titleg = re.search(r'(.+?) (.+)', title)
# 车牌号
dict_data['车牌'] = car = car_titleg.group(1)
dict_data['车牌前缀'] = car.split('-')[0]
# 给用户重命名用的标题是“短标题”,nfo中是“完整标题”,但用户在ini中只用写“标题”
title_only = car_titleg.group(2)
# DVD封面cover
coverg = re.search(r'bigImage" href="(.+?)">', html_web) # 封面图片的正则对象
if str(coverg) != 'None':
url_cover = coverg.group(1)
else:
url_cover = ''
# 发行日期
premieredg = re.search(r'發行日期:</span> (.+?)</p>', html_web)
if str(premieredg) != 'None':
dict_data['发行年月日'] = time_premiered = premieredg.group(1)
dict_data['发行年份'] = time_premiered[0:4]
dict_data['月'] = time_premiered[5:7]
dict_data['日'] = time_premiered[8:10]
else:
dict_data['发行年月日'] = time_premiered = '1970-01-01'
dict_data['发行年份'] = '1970'
dict_data['月'] = '01'
dict_data['日'] = '01'
# 片长 <td><span class="text">150</span> 分钟</td>
runtimeg = re.search(r'長度:</span> (.+?)分鐘</p>', html_web)
if str(runtimeg) != 'None':
dict_data['片长'] = runtimeg.group(1)
else:
dict_data['片长'] = '0'
# 导演
directorg = re.search(r'導演:</span> <a href=".+?">(.+?)<', html_web)
if str(directorg) != 'None':
dict_data['导演'] = replace_xml_win(directorg.group(1))
else:
dict_data['导演'] = '有码导演'
# 片商 制作商
studiog = re.search(r'製作商:</span> <a href=".+?">(.+?)</a>', html_web)
if str(studiog) != 'None':
dict_data['片商'] = studio = replace_xml_win(studiog.group(1))
else:
dict_data['片商'] = '有码片商'
studio = ''
# 系列:</span> <a href="https://www.cdnbus.work/series/kpl">悪質シロウトナンパ</a>
seriesg = re.search(r'系列:</span> <a href=".+?">(.+?)</a>', html_web) # 封面图片的正则对象
if str(seriesg) != 'None':
dict_data['系列'] = series = seriesg.group(1).replace(sep, '#')
else:
dict_data['系列'] = '有码系列'
series = ''
# 演员们 和 # 第一个演员
actors = re.findall(r'star/.+?"><img src=.+?" title="(.+?)">', html_web)
if actors:
if len(actors) > 7:
dict_data['全部演员'] = ' '.join(actors[:7])
else:
dict_data['全部演员'] = ' '.join(actors)
dict_data['首个演员'] = actors[0]
# 有些用户需要删去 标题 末尾可能存在的 演员姓名
if settings.bool_strip_actors and title_only.endswith(dict_data['全部演员']):
title_only = title_only[:-len(dict_data['全部演员'])].rstrip()
else:
actors = ['有码演员']
dict_data['首个演员'] = dict_data['全部演员'] = '有码演员'
# 处理影片的标题过长
dict_data['完整标题'] = title_only
if len(title_only) > settings.int_title_len:
dict_data['标题'] = title_only[:settings.int_title_len]
else:
dict_data['标题'] = title_only
# 特点
genres = re.findall(r'genre"><a href=".+?">(.+?)</a></span>', html_web)
if bool_subtitle: # 有“中字“,加上特征”中文字幕”
genres.append('中文字幕')
if bool_divulge: # 是流出无码片,加上特征'无码流出'
genres.append('无码流出')
try:
genres = [dict_genre[i] for i in genres if dict_genre[i] != '删除']
except KeyError as error:
num_fail += 1
record_fail(' >第' + str(num_fail) + '个失败!发现新的特征需要添加至【特征对照表】:' + str(error) + '\n')
continue
# print(genres)
# arzon的简介 #########################################################
# 去arzon找简介
if settings.bool_nfo and settings.bool_plot and jav.episode == 1:
plot, status_arzon, acook = find_plot_arzon(car, cookie_arzon, proxy_arzon)
if status_arzon == 0:
pass
elif status_arzon == 1:
num_warn += 1
record_warn(' >第' + str(num_warn) + '个失败!找不到简介,尽管arzon上有搜索结果:' + path_relative + '\n')
else:
num_warn += 1
record_warn(' >第' + str(num_warn) + '个失败!找不到简介,影片被arzon下架:' + path_relative + '\n')
# 需要翻译简介
if settings.bool_tran:
plot = translate(tran_id, tran_sk, plot, to_language)
if plot.startswith('【百度'):
num_fail += 1
record_fail(' >第' + str(num_fail) + '个失败!翻译简介失败:' + path_relative + '\n')
# 去除xml文档不允许的特殊字符 &<> \/:*?"<>|
plot = replace_xml(plot)
# print(plot)
else:
plot = ''
#######################################################################
dict_data['视频'] = dict_data['原文件名'] = jav.name_no_ext # dict_data['视频'],先定义为原文件名,即将发生变化。
dict_data['原文件夹名'] = jav.folder
# 是CD1还是CDn?
num_all_episodes = dict_car_pref[jav.car] # 该车牌总共多少集
if num_all_episodes > 1:
str_cd = '-cd' + str(jav.episode)
else:
str_cd = ''
# 1重命名视频【相同】
try:
dict_data, jav, num_temp = rename_mp4(jav, num_fail, settings, dict_data, list_name_video,
path_relative, str_cd)
num_fail = num_temp
except FileExistsError:
num_fail += 1
continue
# 2 归类影片【相同】只针对视频文件和字幕文件。注意:第2操作和下面(第3操作+第7操作)互斥,只能执行第2操作或(第3操作+第7操作),归类影片是针对“文件”还是“文件夹”。
try:
jav, num_temp = classify_files(jav, num_fail, settings, dict_data, list_classify_basis,
root_classify)
num_fail = num_temp
except FileExistsError:
num_fail += 1
continue
# 3重命名文件夹【相同】如果是针对“文件”归类,这一步会被跳过。 因为用户只需要归类视频文件,不需要管文件夹。
try:
jav, num_temp = rename_folder(jav, num_fail, settings, dict_data, list_name_folder,
bool_separate_folder, num_all_episodes)
num_fail = num_temp
except FileExistsError:
num_fail += 1
continue
# 更新一下path_relative
path_relative = sep + jav.path.replace(root_choose, '') # 影片的相对于所选文件夹的路径,用于报错
# 4写入nfo【独特】
if settings.bool_nfo:
if settings.bool_cd_only:
path_nfo = jav.root + sep + jav.name_no_ext.replace(str_cd, '') + '.nfo'
else:
path_nfo = jav.root + sep + jav.name_no_ext + '.nfo'
title_in_nfo = ''
for i in list_name_nfo_title:
title_in_nfo += dict_data[i] # nfo中tilte的写法
# 开始写入nfo,这nfo格式是参考的kodi的nfo
f = open(path_nfo, 'w', encoding="utf-8")
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\" ?>\n"
"<movie>\n"
" <plot>" + plot + "</plot>\n"
" <title>" + title_in_nfo + "</title>\n"
" <originaltitle>" + title + "</originaltitle>\n"
" <director>" + dict_data['导演'] + "</director>\n"
" <year>" + dict_data['发行年份'] + "</year>\n"
" <mpaa>NC-17</mpaa>\n"
" <customrating>NC-17</customrating>\n"
" <countrycode>JP</countrycode>\n"
" <premiered>" + time_premiered + "</premiered>\n"
" <release>" + time_premiered + "</release>\n"
" <runtime>" + dict_data['片长'] + "</runtime>\n"
" <country>日本</country>\n"
" <studio>" + studio + "</studio>\n"
" <id>" + car + "</id>\n"
" <num>" + car + "</num>\n"
" <set>" + series + "</set>\n") # emby不管set系列,kodi可以
# 需要将特征写入genre
if settings.bool_genre:
for i in genres:
f.write(" <genre>" + i + "</genre>\n")
if settings.bool_write_series and series:
f.write(" <genre>系列:" + series + "</genre>\n")
if settings.bool_write_studio and studio:
f.write(" <genre>片商:" + studio + "</genre>\n")
if list_extra_genres:
for i in list_extra_genres:
f.write(" <genre>" + dict_data[i] + "</genre>\n")
# 需要将特征写入tag
if settings.bool_tag:
for i in genres:
f.write(" <tag>" + i + "</tag>\n")
if settings.bool_write_series and series:
f.write(" <tag>系列:" + series + "</tag>\n")
if settings.bool_write_studio and studio:
f.write(" <tag>片商:" + studio + "</tag>\n")
if list_extra_genres:
for i in list_extra_genres:
f.write(" <tag>" + dict_data[i] + "</tag>\n")
# 写入演员
for i in actors:
f.write(" <actor>\n <name>" + i + "</name>\n <type>Actor</type>\n </actor>\n")
f.write("</movie>\n")
f.close()
print(' >nfo收集完成')
# 5需要两张封面图片【独特】
if settings.bool_jpg:
# fanart和poster路径
path_fanart = jav.root + sep
path_poster = jav.root + sep
for i in list_name_fanart:
path_fanart += dict_data[i]
for i in list_name_poster:
path_poster += dict_data[i]
# print(path_fanart)
# kodi只需要一份图片,图片路径唯一
if settings.bool_cd_only:
path_fanart = path_fanart.replace(str_cd, '')
path_poster = path_poster.replace(str_cd, '')
# emby需要多份,现在不是第一集,直接复制第一集的图片
elif jav.episode != 1:
try:
copyfile(path_fanart.replace(str_cd, '-cd1'), path_fanart)
print(' >fanart.jpg复制成功')
copyfile(path_poster.replace(str_cd, '-cd1'), path_poster)
print(' >poster.jpg复制成功')
except FileNotFoundError:
pass
# kodi或者emby需要的第一份图片
if check_picture(path_fanart):
# print(' >已有fanart.jpg')
pass
else:
# 下载封面
print(' >从javbus下载封面:', url_cover)
try:
download_pic(url_cover, path_fanart, proxy_bus)
print(' >fanart.jpg下载成功')
except:
num_fail += 1
record_fail(' >第' + str(
num_fail) + '个失败!下载fanart.jpg失败:' + url_cover + ',' + path_relative + '\n')
continue # 退出对该jav的整理
# 裁剪生成 poster
if check_picture(path_poster):
# print(' >已有poster.jpg')
pass
else:
crop_poster_youma(path_fanart, path_poster)
# 需要加上条纹
if settings.bool_watermark_subtitle and bool_subtitle:
add_watermark_subtitle(path_poster)
if settings.bool_watermark_divulge and bool_divulge:
add_watermark_divulge(path_poster)
# 6收集演员头像【相同】
if settings.bool_sculpture and jav.episode == 1:
if actors[0] == '有码演员':
print(' >未知演员,无法收集头像')
else:
collect_sculpture(actors, jav.root)
# 7归类影片,针对文件夹【相同】
try:
num_temp = classify_folder(jav, num_fail, settings, dict_data, list_classify_basis, root_classify,
root, bool_separate_folder, num_all_episodes)
num_fail = num_temp
except FileExistsError:
num_fail += 1
continue
except:
num_fail += 1
record_fail(' >第' + str(num_fail) + '个失败!发生错误,如一直在该影片报错请截图并联系作者:' + path_relative + '\n' + format_exc() + '\n')
continue # 【退出对该jav的整理】
# 完结撒花
print('\n当前文件夹完成,', end='')
if num_fail > 0:
print('失败', num_fail, '个! ', root_choose, '\n')
line = -1
with open('【可删除】失败记录.txt', 'r', encoding="utf-8") as f:
content = list(f)
while 1:
if content[line].startswith('已'):
break
line -= 1
for i in range(line+1, 0):
print(content[i], end='')
print('\n“【可删除】失败记录.txt”已记录错误\n')
else:
print(' “0”失败! ', root_choose, '\n')
if num_warn > 0:
print('“警告信息.txt”还记录了', num_warn, '个警告信息!\n')
# os.system('pause')
input_start_key = input('回车继续选择文件夹整理:')
|
[
"1780089183@qq.com"
] |
1780089183@qq.com
|
dd5e7f37317396c24b71e4bac839eacd831d2205
|
54f75e2cf3094bd4073c24321adfd7b7b9a2d88a
|
/dsClass/mtcnn_detect.py
|
d0b08628acb852fc359914d01ac8df21ef222ba2
|
[] |
no_license
|
goolig/dsClass
|
2bff1ac0a45f448246dc0e552f6a7952762a27bc
|
67567d1d71a8a747c8a68c621045fe53ff139103
|
refs/heads/master
| 2021-11-26T20:24:12.761009
| 2021-11-11T11:43:40
| 2021-11-11T11:43:40
| 150,427,519
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,718
|
py
|
'''
Tensorflow implementation of the mtcnn face detection algorithm
Credit: DavidSandBerg for implementing this method on tensorflow
'''
from six import string_types, iteritems
import numpy as np
import tensorflow as tf
import cv2
import os
class MTCNNDetect(object):
def __init__(self, face_rec_graph, model_path = "", threshold = [0.6, 0.7, 0.7], factor = 0.709, scale_factor = 1):
'''
:param face_rec_sess: FaceRecSession
:param threshold: detection threshold
:param factor: default 0.709 image pyramid -- magic number
:param model_path:
'''
self.threshold = threshold
self.factor = factor
self.scale_factor = scale_factor;
with face_rec_graph.graph.as_default():
print("Loading MTCNN Face detection model")
self.sess = tf.Session()
if not model_path:
model_path, _ = os.path.split(os.path.realpath(__file__))
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None, None, None, 3), 'input')
pnet = PNet({'data': data})
pnet.load(os.path.join(model_path, 'det1.npy'), self.sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None, 24, 24, 3), 'input')
rnet = RNet({'data': data})
rnet.load(os.path.join(model_path, 'det2.npy'), self.sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None, 48, 48, 3), 'input')
onet = ONet({'data': data})
onet.load(os.path.join(model_path, 'det3.npy'), self.sess)
self.pnet = lambda img: self.sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0': img})
self.rnet = lambda img: self.sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0': img})
self.onet = lambda img: self.sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'),
feed_dict={'onet/input:0': img})
print("MTCNN Model loaded")
def detect_face(self, img, minsize):
# im: input image
# minsize: minimum of faces' size
if(self.scale_factor > 1):
img = cv2.resize(img,(int(len(img[0])/self.scale_factor), int(len(img)/self.scale_factor)))
factor_count = 0
total_boxes = np.empty((0, 9))
points = []
h = img.shape[0]
w = img.shape[1]
minl = np.amin([h, w])
m = 12.0 / minsize
minl = minl * m
# creat scale pyramid
scales = []
while minl >= 12:
scales += [m * np.power(self.factor, factor_count)]
minl = minl * self.factor
factor_count += 1
# first stage
for j in range(len(scales)):
scale = scales[j]
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data - 127.5) * 0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = self.pnet(img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self.threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox > 0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = self.rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > self.threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = self.onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > self.threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick, :]
points = points[:, pick]
simple_points = np.transpose(
points) # points is stored in a very weird datastructure, this transpose it to process eaiser
rects = [(max(0,(int(rect[0]))) * self.scale_factor,max(0,int(rect[1])) * self.scale_factor,
int(rect[2] - rect[0]) * self.scale_factor,int(rect[3] - rect[1]) * self.scale_factor) for rect in total_boxes]
return rects, simple_points * self.scale_factor
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='latin1').item() # pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iteritems(data_dict[op_name]):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, string_types):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = int(inp.get_shape()[-1])
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = int(inp.get_shape()[-1])
alpha = self.make_var('alpha', shape=(i,))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= int(d)
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keep_dims=True)
target_exp = tf.exp(target - max_axis)
normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3, name='prob1'))
(self.feed('PReLU3') # pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1, name='prob1'))
(self.feed('prelu4') # pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') # pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') # pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') # pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
# function [boundingbox] = bbreg(boundingbox,reg)
def bbreg(boundingbox, reg):
# calibrate bounding boxes
if reg.shape[1] == 1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
# use heatmap to generate bounding boxes
stride = 2
cellsize = 12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
y, x = np.where(imap >= t)
if y.shape[0] == 1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y, x)]
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
if reg.size == 0:
reg = np.empty((0, 3))
bb = np.transpose(np.vstack([y, x]))
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
return boundingbox, reg
# function pick = nms(boxes,threshold,type)
def nms(boxes, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[0:counter]
return pick
# function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
def pad(total_boxes, w, h):
# compute the padding coordinates (pad the bounding boxes to square)
tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:, 0].copy().astype(np.int32)
y = total_boxes[:, 1].copy().astype(np.int32)
ex = total_boxes[:, 2].copy().astype(np.int32)
ey = total_boxes[:, 3].copy().astype(np.int32)
tmp = np.where(ex > w)
edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1)
ex[tmp] = w
tmp = np.where(ey > h)
edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1)
ey[tmp] = h
tmp = np.where(x < 1)
dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)
x[tmp] = 1
tmp = np.where(y < 1)
dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
# function [bboxA] = rerec(bboxA)
def rerec(bboxA):
# convert bboxA to square
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = np.maximum(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.transpose(np.tile(l, (2, 1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) # @UndefinedVariable
return im_data
|
[
"shtar@post.bgu.ac.il"
] |
shtar@post.bgu.ac.il
|
70dd995cb028be14737e72a3c58cb07774785acb
|
e9ac72e0736ac17c5229c4b614dea510d643c88b
|
/aoj/alds1_1_d.py
|
3c57f3ecc0e691e8c933b0ebf4bb8066a022cbd3
|
[] |
no_license
|
famasoon/kyopro
|
204021162f9b3b50797e0fc674b4fbb6e806ca97
|
5876ca59cd9ca3c406cdc88ae857656e5e3886d4
|
refs/heads/master
| 2023-09-01T19:22:19.703108
| 2021-10-25T13:05:11
| 2021-10-25T13:05:11
| 213,641,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
n = int(input())
max_value = -2000000000
min_value = int(input())
for i in range(1, n):
new_value = int(input())
if new_value - min_value > max_value:
max_value = new_value - min_value
if min_value > new_value:
min_value = new_value
print(max_value)
|
[
"famasoon@gmail.com"
] |
famasoon@gmail.com
|
1176757494ee948beb10dc386770bfbd2a823956
|
a29310948867f5f07109fcd225a84282ad7eea16
|
/design_models/template_method.py
|
c4f800913310ae0c850b9c6b745efc7ed06b179d
|
[] |
no_license
|
likeweilikewei/Python-study-demo
|
09b266c0756b6e340e8b8e3153a7e497be8ee1a9
|
7dd4bc851273a5815d8980f9857828abfa5364a7
|
refs/heads/master
| 2020-06-26T21:17:27.095532
| 2019-07-31T02:17:43
| 2019-07-31T02:17:43
| 199,760,324
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
#!/usr/bin/python
# coding:utf8
'''
Template Method
模板方法模式:
应用特性:重复做相同逻辑的事情,但是具体细节不同的场景
结构特性:相同逻辑抽取至父类,具体细节留置子类。可以说是对逻辑的抽象
'''
ingredients = "spam eggs apple"
line = '-' * 10
# Skeletons
def iter_elements(getter, action):
"""Template skeleton that iterates items"""
for element in getter():
action(element)
print(line)
def rev_elements(getter, action):
"""Template skeleton that iterates items in reverse order"""
for element in getter()[::-1]:
action(element)
print(line)
# Getters
def get_list():
return ingredients.split()
def get_lists():
return [list(x) for x in ingredients.split()]
# Actions
def print_item(item):
print(item)
def reverse_item(item):
print(item[::-1])
# Makes templates
def make_template(skeleton, getter, action):
"""Instantiate a template method with getter and action"""
def template():
skeleton(getter, action)
return template
# Create our template functions
templates = [make_template(s, g, a)
for g in (get_list, get_lists)
for a in (print_item, reverse_item)
for s in (iter_elements, rev_elements)]
# Execute them
for template in templates:
template()
|
[
"1293120583@qq,com"
] |
1293120583@qq,com
|
056c9e4811f80752b17207d170437781ff891727
|
ad553dd718a8df51dabc9ba636040da740db57cf
|
/.history/app_20181213160433.py
|
0cdeeb5a5dad82ce5b51349eae825d57abecf7ae
|
[] |
no_license
|
NergisAktug/E-Commerce-PythonWithFlask-Sqlite3
|
8e67f12c28b11a7a30d13788f8dc991f80ac7696
|
69ff4433aa7ae52ef854d5e25472dbd67fd59106
|
refs/heads/main
| 2023-01-01T14:03:40.897592
| 2020-10-19T20:36:19
| 2020-10-19T20:36:19
| 300,379,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,372
|
py
|
import datetime
from flask import Flask,flash, request, render_template_string, render_template
from flask import Flask, url_for, render_template, request, redirect, session, escape, render_template_string
from flask_babelex import Babel
from flask_sqlalchemy import SQLAlchemy
from flask_user import current_user, login_required, roles_required
from sqlalchemy.sql import table, column, select
from sqlalchemy import MetaData, create_engine
from flask_user import login_required, roles_required, UserManager, UserMixin
from flask_login import login_user
class ConfigClass(object):
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SQLALCHEMY_DATABASE_URI = 'sqlite:///eticaret.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = 'nergis.aktug2014@gmail.com'
MAIL_PASSWORD = '05383896877'
MAIL_DEFAULT_SENDER = '"MyApp" <xyz@gmail.com>'
USER_ENABLE_EMAIL = True
USER_ENABLE_USERNAME = False
USER_EMAIL_SENDER_EMAIL = "noreply@example.com"
def create_app():
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
db = SQLAlchemy(app)
class Kullanici(db.Model):
__tablename__ = 'Kullanici'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(80), unique=True)
sifre = db.Column(db.String(80))
rolId = db.Column(db.Integer, db.ForeignKey('rol.rolId', ondelete='CASCADE'))
def __init__(self, email, sifre,rolId):
self.email = email
self.sifre = sifre
self.rolId =rolId
class Roller(db.Model):
__tablename__ = 'rol'
rolId = db.Column(db.Integer, primary_key=True)
rolisim = db.Column(db.String(80))
class urunler(db.Model):
__tablename__ = 'urunler'
urun_id = db.Column(db.Integer, primary_key=True)
urunismi = db.Column(db.String(80))
urunresmi = db.Column(db.String(80))
urunFiyati = db.Column(db.Integer)
markaId = db.Column(db.Integer(), db.ForeignKey('markalar.markaId', ondelete='CASCADE'))
def __init__(self, urunismi, urunresmi, urunFiyati,markaId):
self.urunismi =urunismi
self.urunresmi = urunresmi
self.urunFiyati = urunFiyati
self.markaId=markaId
class markalar(db.Model):
__tablename__ = 'markalar'
markaId = db.Column(db.Integer, primary_key=True)
markaadi = db.Column(db.String(80))
marka_modeli = db.Column(db.String(80))
def __init__(self, markaadi, marka_modeli):
self.markaadi = markaadi
self.marka_modeli = marka_modeli
class musteri(db.Model):
__tablename__ = 'musteri'
musteriId = db.Column(db.Integer, primary_key=True)
musteriadi = db.Column(db.String(80))
musterisoyadi = db.Column(db.String(80))
mail = db.Column(db.String(80), unique=True)
telefon = db.Column(db.Integer)
sifre = db.Column(db.String(80))
il = db.Column(db.String(80))
ilce = db.Column(db.String(80))
kullaniciId = db.Column(db.Integer(), db.ForeignKey('Kullanici.id', ondelete='CASCADE'))
def __init__(self, musteriadi, musterisoyadi, mail, telefon, sifre, il, ilce, kullaniciId):
self.musteriadi = musteriadi
self.musterisoyadi = musterisoyadi
self.mail = mail
self.telefon = telefon
self.sifre = sifre
self.il = il
self.ilce = ilce
self.kullaniciId = kullaniciId
class siparis(db.Model):
__tablename__ = 'siparis'
siparisId = db.Column(db.Integer, primary_key=True)
musteriId = db.Column(db.Integer(), db.ForeignKey('musteri.musteriId', ondelete='CASCADE'))
urunId = db.Column(db.Integer(), db.ForeignKey('urunler.urun_id', ondelete='CASCADE'))
siparisno = db.Column(db.Integer)
siparisTarihi = db.Column(db.Integer)
odemeId = db.Column(db.Integer())
def __init__(self, musteriId, urunId, siparisno, siparisTarihi, odemeId):
self.musteriId = musteriId
self.urunId = urunId
self.siparisno = siparisno
self.siparisTarihi = siparisTarihi
self.odemeId = odemeId
db.create_all()
urunler = urunler.query.all()
@app.route('/')
def anasayfa():
tumVeri=urunler.query.all()
return render_template('index.html',tumVeri=tumVeri)
@app.route('/kayit', methods=['GET', 'POST'])
def kayit():
if request.method == 'POST':
mail = request.form['email']
parola = request.form['sifre']
yeniKullanici = Kullanici(email=mail, sifre=parola,ro)
db.session.add(yeniKullanici)
db.session.commit()
if yeniKullanici is not None:
mesaj = "Kayıt Başarıyla Sağlanmıştır."
return render_template("index.html", mesaj=mesaj)
else:
return render_template('kayit.html')
@app.route('/admin')
def admin():
return render_template("admin.html")
@app.route('/uye', methods=['GET', 'POST'])
def uye():
return render_template("uyeGirisi.html")
@app.route('/giris', methods=['GET', 'POST'])
def giris():
if request.method == 'GET':
return render_template('uyeGiris.html')
else:
email = request.form['email']
sifre = request.form['sifre']
data = Kullanici.query.filter_by(email=email, sifre=sifre).first()
if data is not None:
if Kullanici.query.filter_by(email=email, sifre=sifre, yetki=1).first():
session['admin_giris'] = True
return render_template('admin.html',yetki = 1, giris = session['admin_giris'],urunler = urunler)
else:
session['uye_giris'] = True
return render_template('index.html',yetki = 0, giris = session['uye_giris'],urunler = urunler)
else:
return render_template('uyeGiris.html')
@app.route('/cikis')
def cikis():
session.pop('admin_giris',None)
session.pop('uye_giris',None)
return render_template("index.html")
@app.route('/urunEkle')
def urunGoster():
tumVeri=urunler.query.all()
return render_template("urunEkle.html",tumVeri=tumVeri)
@app.route('/urunEklemeYap',methods=['POST'])
def urunEklemeYap():
urunismi=request.form['urunismi']
urunResmi=request.form['urunresmi']
urunFiyati=request.form['fiyati']
markaId=request.form['markaId']
yeniUrun=urunler(urunismi=urunismi,urunresmi=urunResmi,urunFiyati=urunFiyati,markaId=markaId)
db.session.add(yeniUrun)
db.session.commit()
return redirect(url_for("urunGoster"))
@app.route("/sil/<string:id>")
def sil(id):
urun=urunler.query.filter_by(urun_id=id).first()
db.session.delete(urun)
db.session.commit()
return redirect(url_for('urunGoster'))
@app.route('/guncelle/<string:id>',methods=['POST','GET'])
def guncelle(id):
try:
urunismi = request.form.get("urunİsmi")
urunresmi = request.form.get("urunresmi")
urunFiyati = request.form.get("urunFiyati")
markaId = request.form.get("markaId")
urun = urunler.query.filter_by(urun_id=id).first()
urun.urunismi = urunismi
urun.urunresmi=urunresmi
urun.urunFiyati=urunFiyati
urun.markaId=markaId
db.session.commit()
except Exception as e:
print("güncelleme yapılamadı")
print(e)
return redirect(url_for('urunGoster'))
@app.route('/sepet')
def sepet():
return render_template("sepet.html")
@app.route('/Markalar')
def Markalar():
tumMarka=markalar.query.all()
return render_template("marka.html",tumMarka=tumMarka)
return app
if __name__ == '__main__':
app=create_app()
app.run(host='127.0.0.1', port=5000, debug=True)
|
[
"nergis.aktug2014@gmail.com"
] |
nergis.aktug2014@gmail.com
|
0e10e7efaf7e20b32f5121b6c439c455706e8bcf
|
97287008f36bb0a483def8c88a7fe3ee1a190091
|
/locust_test.py
|
7bf9a14ff5b5cd2fa68b2d2aa4070d373d162b62
|
[] |
no_license
|
olivetree123/miaosha
|
2adb01ebbc1f55aaefa01343c9a981190a60b061
|
aef4b4850863434ed694c774c4e5488af1cfca05
|
refs/heads/master
| 2022-12-17T19:37:13.125470
| 2020-09-25T02:56:58
| 2020-09-25T02:56:58
| 295,648,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
import os
import json
from locust import HttpUser, TaskSet, task, between
# class WebsiteTasks(TaskSet):
# def on_start(self):
# self.client.post("/api/login",
# json={
# "account": "gaojian",
# "cipher": "123456"
# })
# @task
# def buy(self):
# r = self.client.post("/api/buy",
# json={
# "goods": [{
# "id": "b9c661c3c5d949ca866512c936bcb6f1",
# "amount": 1,
# }]
# })
# if r.status_code == 200:
# rst = json.loads(r.text, strict=False)
# if rst["code"] == 0:
# r.success()
# else:
# r.failure("code:%s ErrorMsg:%s" %
# (rst["code"], rst["message"]))
# else:
# r.failure("status_code:%s" % r.status_code)
class WebsiteUser(HttpUser):
# task_set = WebsiteTasks
host = "http://localhost:5000"
wait_time = between(1, 2)
token = "ea0d85e817874df9a45c1606de2970eb"
# def on_start(self):
# r = self.client.post("/api/login",
# json={
# "account": "gaojian",
# "cipher": "123456"
# })
# if not r.ok:
# raise Exception("failed to login")
# rst = json.loads(r.text, strict=False)
# if rst["code"] != 0:
# raise Exception("failed to login")
# self.token = rst["data"]["token"]
@task
def buy(self):
r = self.client.post(
"/api/buy",
headers={"Authorization": "Token {}".format(self.token)},
json={
"goods": [{
"id": "b9c661c3c5d949ca866512c936bcb6f1",
"amount": 1,
}]
})
if r.status_code == 200:
rst = json.loads(r.text, strict=False)
if rst["code"] != 0:
print("Error, code = %s, ErrorMsg = %s" %
(rst["code"], rst["message"]))
else:
print("Request Failed, status_code = ", r.status_code)
|
[
"gaojian@g13125g.h3c.huawei-3com.com"
] |
gaojian@g13125g.h3c.huawei-3com.com
|
f6c251cdce11dd0e1e9f351b3b011a956129af9d
|
069b8abf52f678d42cb44da38ac57f1304c230e3
|
/neitwork/__init__.py
|
5619d5ca005491e0da6b5c46d3d2b7c59cab560c
|
[] |
no_license
|
neiteng/neitwork
|
6eea9d0b50bb95c6f9f33a21a806c499a2307b9b
|
fdf0410e7de2fdfcebcd1c3f2c440dfcefa65341
|
refs/heads/master
| 2021-04-28T14:55:11.904366
| 2018-02-26T06:18:08
| 2018-02-26T06:18:08
| 121,978,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from . import layer
from . import function
from . import optimizer
from . import trainer
|
[
"neiteng.kyoto@gmail.com"
] |
neiteng.kyoto@gmail.com
|
1ece1d4e55f00e4c4abfc4a3a5fb886074f1a93d
|
6e5ae7164c961f7ace769624099c902fe187d8a8
|
/validator.py
|
a10409b0f5e083ab8b834a3743ee327d59c88aa7
|
[] |
no_license
|
RobertoCarnauba/swagger-validator
|
0d8b22b2192aca3c7a2ca92ea0a643f110d36384
|
42e5aa860dd28683aea7dacd0e60a68520cde8ac
|
refs/heads/master
| 2023-07-13T15:46:53.203086
| 2021-08-27T20:08:09
| 2021-08-27T20:08:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,725
|
py
|
import yaml
swagger = yaml.safe_load(open(
'C:/Users/robertod/OneDrive - HDI SEGUROS SA/Área de Trabalho/python-swagger/swagger_validator/cor-pty-person-credit-score-read-be.yaml'))
print('----------------------------------------------------------------------------------------------------------------------')
print(' **** Iniciando a validação do swagger ****')
corporate = ['broker', 'business', 'channel', 'finance', 'humanResources', 'integration',
'legal', 'marketing', 'party', 'product', 'reference', 'reinsurance', 'security']
insurance = ['claim', 'coinsurance-policy', 'coinsurance-proposal',
'coinsurance-quotation', 'policy', 'proposal', 'quotation', 'sales']
marketplace = ['policy', 'proposal', 'quotation', 'sales']
validSuperDomain = ['corporate', 'insurance', 'marketplace']
funcionalidade_tecnicas = ['calculate', 'check', 'create', 'delete',
'insert', 'list', 'pull', 'push', 'read', 'recieve', 'send', 'update']
# prefixo_repositorio_corporate = []
# prefixo_repositorio_insurance = []
# prefixo_repositorio_marketeplace = []
print('===== Validação do basePath =====')
# validação basepath
basePath = swagger['basePath']
try:
'basePath' in locals()
print('Has basePath')
superDominio = basePath.split("/")[1]
dominio = basePath.split("/")[2]
if any(x.isupper() for x in superDominio):
print(' *** O SuperDominio não poder ter letra maiúscula ***')
if superDominio in validSuperDomain:
print('Super Dominio OK: ' + (superDominio))
if superDominio == 'corporate':
swaggerSuperDomain = corporate
elif superDominio == 'insurance':
swaggerSuperDomain = insurance
elif superDominio == 'marketplace':
swaggerSuperDomain = marketplace
else:
print('O Super Dominio está ERRADO! ' + (superDominio))
swaggerSuperDomain = dominio
except yaml.YAMLError as exc:
print(exc)
if dominio in swaggerSuperDomain:
if dominio in swaggerSuperDomain:
print('Dominio de dados OK: ' + (dominio))
print('basePath OK: ' + (basePath))
else:
print(" *** ATENÇÃO basePath com inconsistência ***")
print('Verificar o Domínio de Dado:' + (dominio))
print('-------')
# Validando o host
print('===== Validação do host =====')
try:
'host' in locals()
print('Has host')
host = swagger['host']
api = host[0:4]
if not api == 'api.':
print('Erro no host add api no inicio')
ponto = host[4:]
indice_ponto = ponto.find('.')
nome_servico = ponto[:indice_ponto]
print('Nome do serviço: ' + (nome_servico))
funcionalidade = nome_servico[::-1]
find_traco = funcionalidade.find('-')
tipo_de_servico = nome_servico[-find_traco:]
if tipo_de_servico == 'be':
print('Tipo de serviço: ' + (tipo_de_servico))
servico = funcionalidade.split("-")[1]
convert_service = servico[::-1]
if convert_service in funcionalidade_tecnicas:
print('Funcionalidade técnica: ' + convert_service)
else:
print('verificar a funcionaldiade de negocio')
else:
servico = funcionalidade.split("-")[1]
if servico[::-1] in funcionalidade_tecnicas:
print('Tipo de serviço: ' + (tipo_de_servico))
print('Funcionalidade técnica: ' + (servico[::-1]))
else:
print('verificar a funcionaldiade de negocio: ' + (servico[::-1]))
except yaml.YAMLError as exc:
print(exc)
print('---')
# validação securityDefinitions
securityDefinitions = swagger['securityDefinitions']
if 'securityDefinitions' in locals():
print('Has securityDefinitions')
else:
print(securityDefinitions)
print('---')
# validação security
security = swagger['security']
if 'security' in locals():
print('Has security')
else:
print(security)
print('---')
# validação parameters
parameters = swagger['parameters']
if 'parameters' in locals():
print('Has parameters')
else:
print(parameters)
print('---')
# validação responses
responses = swagger['responses']
if 'responses' in locals():
print('Has responses')
else:
print(responses)
print('---')
# validação paths
paths = swagger['paths']
try:
'paths' in locals()
print('Has paths')
except yaml.YAMLError as exc:
print(exc)
print('---')
# validação definition/payload
# definitions = swagger['definitions']
# try:
# 'definitions' in locals()
# print('Has definitions')
# print(json.dumps(definitions, sort_keys=True, indent=2))
# except yaml.YAMLError as exc:
# print(exc)
print(' **** Fim da validação ****')
|
[
"betocarnauba@hotmail.com"
] |
betocarnauba@hotmail.com
|
df112e7335a865a08acddb35152e374d2f38c333
|
9ace5b6c2380f69db7887774a9ef58a02d893ea6
|
/proyecto/nestor_save_msgs/connection.py
|
3493e35cdd541deade26f1e07e5973a100adae73
|
[] |
no_license
|
heru-nan/slack-bot-and-tutorials
|
72babe351fc1969c3231c80505e8ef39b6d6cc25
|
40ef30b4e5b9557c348c6ea85937b500956b0a17
|
refs/heads/main
| 2023-02-15T13:44:41.067824
| 2021-01-08T17:39:06
| 2021-01-08T17:39:06
| 309,792,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
import os
import mysql.connector
from datetime import datetime, date
host = os.environ["MYSQL_HOST"]
port = int(os.environ["MYSQL_PORT"])
print("host: " + str(host) + " | port: " + str(port))
TABLE_QUERY = (
"CREATE TABLE `messages` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `user` varchar(60) NOT NULL,"
" `channel` varchar(60) NOT NULL,"
" `text` VARCHAR(255) NOT NULL,"
" `ts` date NOT NULL,"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
INSERT_QUERY = ("INSERT INTO messages "
"(user, channel, text, ts) "
"VALUES (%s, %s, %s, %s)")
def save_msg(obj):
cnx = mysql.connector.connect(host = host, port = port, user="root", password="password", database="db")
if not checkTableExists(cnx, "messages"):
cur = cnx.cursor()
try:
print("Creating table {}: ".format("MESSAGES"), end='')
cur.execute(TABLE_QUERY)
except mysql.connector.Error as err:
print(err.msg)
cur.close()
cur = cnx.cursor()
d_format = datetime.fromtimestamp(float(obj["ts"])).date()
obj["ts"] = d_format
obj["user"] = "<@{}>".format(obj["user"])
obj["channel"] = "<@{}>".format(obj["channel"])
cur.execute(INSERT_QUERY, list(obj.values()))
res = ""
if cur.lastrowid:
res = "last insert id " + str(cur.lastrowid)
else:
res = "last insert id not found"
cnx.commit()
cur.close()
cnx.close()
return res
def checkTableExists(cnx, tablename):
dbcur = cnx.cursor()
dbcur.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = '{0}'
""".format(tablename.replace('\'', '\'\'')))
if dbcur.fetchone()[0] == 1:
dbcur.close()
return True
return False
|
[
"hrwgallardo@gmail.com"
] |
hrwgallardo@gmail.com
|
9e46f87709b383c14f1fc7c392d2d1abab38e158
|
e3013899b69d6bc9922b82f9648e5ada59b6cf7e
|
/project2/static-router.py
|
03ab6239bea80b52b2cd6b7ac168400bf8379ef4
|
[] |
no_license
|
stacybird/CS594
|
24eec43f26045d28b96324b719fe346170f2e025
|
cd261ce17f2b94495bc16eb889f283ebec9f4881
|
refs/heads/master
| 2021-01-01T19:16:24.725138
| 2013-06-08T07:05:18
| 2013-06-08T07:05:18
| 10,066,570
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,998
|
py
|
#!/usr/bin/python
# Copyright (c) 2013 Charles V Wright <cvwright@cs.pdx.edu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The names of the authors and copyright holders may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import time
import heapq
import random
import difflib
import matplotlib
import ipaddr
import struct
import socket
from multiprocessing import Process, Queue
from Queue import Full as QueueFullException
from optparse import OptionParser
# dpkt is still off-limits for the most part
# This is only here to allow reading and
# writing of pcap files.
import dpkt
# But the Ethernet module is fair game!
# Feel free to use this one:
from dpkt.ethernet import Ethernet
from dpkt.udp import UDP
from dpkt.rip import RIP
DEBUG = False
num_interfaces = 0
# our Global definitions
# dictionary for mac address table
mac_tbl = {}
# list of list for interface table
interface_tbl = []
fwd_tbl = []
fwd_tbl_srt = []
# Our new functions
##
##
## Cited Sources May be Needed
# credit: Tyler?
def check_fwd(ck_add):
global fwd_tbl_srt
#Relocate this code to a function called locate add a counter
#Iterate through list from top to bottom returning the Forward address
netwk = ipaddr.IPv4Address(ck_add)
for i in fwd_tbl_srt:
if netwk in i[0]:
#returns the address found in the fwd_tbl
return i[1]
# credit: Tyler?
def check_iface(ck_add):
global interface_tbl
#Relocate this code to a function called locate add a counter
#Iterate through list from top to bottom returning the Forward address
netwk = ipaddr.IPv4Address(ck_add)
for i in interface_tbl:
if netwk in i[1]:
#retuns the interface port for the matched address
return i[2]
# credit: Tyler?
def send_arp_resp(ts, iface, pkt, queues):
global interface_tbl
if DEBUG:
print pkt.encode("hex")
destIP = pkt[28:32]#slice from pkt sender IP
destMac = pkt[6:12]#slice from pkt sender Mac
srcIP = pkt[38:42]#slide from pkt dest IP
#Second Variable for Manipulation
srcIP2 = srcIP
#Construct in Dot Notation for use in function check_iface
srcConst = str(int((srcIP2[0:1].encode("hex")), 16)) + "." + str(int((srcIP2[1:2].encode("hex")), 16)) + "." + str(int((srcIP2[2:3].encode("hex")), 16)) + "." + str(int((srcIP2[3:4].encode("hex")), 16))
catch = check_iface(srcConst)
srcMac = interface_tbl[int(catch)][0]
#split mac address to remove :
srcMac = srcMac.split(":")
#Change to Int then Binary and Concatenate
srcMacT = chr(int(srcMac[0], 16))
srcMacT += chr(int(srcMac[1], 16))
srcMacT += chr(int(srcMac[2], 16))
srcMacT += chr(int(srcMac[3], 16))
srcMacT += chr(int(srcMac[4], 16))
srcMacT += chr(int(srcMac[5], 16))
if DEBUG:
print srcMacT.encode("hex")
#rewrite items pkt
pkt = destMac + pkt[6:]
pkt = pkt[:6] + srcMacT + pkt[12:]
pkt = pkt[:21] + chr(2) + pkt[22:]
pkt = pkt[:22] + srcMacT + pkt[28:]
pkt = pkt[:28] + srcIP + pkt[32:]
pkt = pkt[:32] + destMac + pkt[38:]
pkt = pkt[:38] + destIP
if DEBUG:
print pkt.encode("hex")
#prep for sending
q = queues[iface]
try:
q.put( (ts,pkt) )
except QueueFullException:
drop_count += 1
# Written by Stacy
# Ignore Ethernet frames addressed to other devices on the LAN
# This function should take in an ethernet frame, check the MAC address to
# see if it is us on this interface. If so, returns a 1. If not, return "None".
def ethernet_for_us(packet, arrival_iface):
global interface_tbl
# first, turn the packet into readable ethernet:
eth = dpkt.ethernet.Ethernet(packet)
# now check our interface table to see if the interface it came in on matches
# the MAC address being sent to. If not, we ignore it.
strng = interface_tbl[arrival_iface][0]
newstr = strng.replace(":", "")
if (eth.dst.encode("hex") == newstr):
if DEBUG:
print "Arrival interface MAC %s" %interface_tbl[arrival_iface][0]
return 1
else:
if DEBUG:
print "Source and dest interfaces do not match"
return 0
# Written by Stacy
# Function to forward IP packets to the proper outbound interface based on
# longest-prefix matching. Takes in the ethernet packet, returns the interface
# to send out on.
def forward_IP_packets_to_iface(packet):
# get the destination IP out of the ethernet data payload (the IP packet)
dest_ip = packet[30:34]
dest_ip = dest_ip.encode("hex")
# turn this into the quad address we stored the table as
pretty_ip = str(int(dest_ip[0:2], 16)) + "." + str(int(dest_ip[2:4], 16)) + "." + str(int(dest_ip[4:6], 16)) + "." + str(int(dest_ip[6:8], 16))
if DEBUG:
print "Destination IP: %s" %pretty_ip
#Test Look Up Return IPV4 address object based on longest prefix match
catch = check_fwd(pretty_ip)
if DEBUG:
print "IP of outgoing interface %s" %catch
catch2 = check_iface(catch)
if DEBUG:
print "interface of the ip being sent to: %s \n" %catch2
return catch2
# Written by Stacy
# verify checksum. if invalid can drop the packet.
def valid_checksum(packet):
# pull the expected checksum out of the header
pkt_value = packet[24:26]
packet_header = packet[14:34]
sum = 0
# compute the expected checksum
##### math: (14 +2i) to (14 +2i +2) add the numbers all together in hex
for i in range(0, 10):
if i != 5:
temp = packet_header[2*i:2*i+2].encode("hex")
sum += int(temp, 16)
else:
pass
##### then do the 1's complement: div(result) + mod(result). That's the expected.
sum = (sum % int('ffff', 16))
calc_checksum = hex(sum ^ 0xFFFF)
if DEBUG:
print "Calculated xor checksum: %s" %calc_checksum
print "Original checksum: %s" %pkt_value.encode("hex")
if int(pkt_value.encode("hex"), 16) == int(calc_checksum, 16):
if DEBUG:
print "Checksums matched, value was: %s" %pkt_value.encode("hex")
return 1
else:
if DEBUG:
print "Checksums did not match, packet should drop."
return 0
# Written by Stacy
# Decrement the TTL
def decrement_ttl(packet):
if packet[22] == 1:
# send ICMP "Time Exceeded" message
if DEBUG:
print "Time Exceeded for this packet"
return 0
else:
old_ttl = int(pkt[22:23].encode("hex"), 16)
new_ttl = old_ttl - 1
if DEBUG:
print "Original TTL: %s" %old_ttl
print "New TTL: %s" %new_ttl
packet = packet[:22] + chr(new_ttl) + packet[23:]
return packet
# Written by Stacy
# recompute the IP header checksum based on the new header
def recompute_checksum(packet):
# pull the header out for computation
packet_header = packet[14:34]
sum = 0
# compute the checksum
##### math: (14 +2i) to (14 +2i +2) add the numbers all together in hex
for i in range(0, 10):
if i != 5:
temp = packet_header[2*i:2*i+2].encode("hex")
sum += int(temp, 16)
else:
pass
##### then do the 1's complement: div(result) + mod(result). That's the expected.
sum = (sum % int('ffff', 16))
calc_checksum = hex(sum ^ 0xFFFF)
calc_checksum = "{0:#0{1}x}".format(int(calc_checksum, 16),6)
if DEBUG:
print "Calculated xor checksum: %s" %calc_checksum
# to update:::::::: pkt_value = packet[24:26]
packet = packet[:24] + chr(int(calc_checksum[2:4], 16)) + packet[25:]
packet = packet[:25] + chr(int(calc_checksum[4:6], 16)) + packet[26:]
return packet
# credit: Tyler?
def send_ICMP_resp(ts, iface, pkt, queues):
global interface_tbl
if DEBUG:
print pkt.encode("hex")
destIP = pkt[28:32]#slice from pkt sender IP
destMac = pkt[6:12]#slice from pkt sender Mac
srcIP = pkt[38:42]#slide from pkt dest IP
#Second Variable for Manipulation
srcIP2 = srcIP
#Construct in Dot Notation for use in function check_iface
srcConst = str(int((srcIP2[0:1].encode("hex")), 16)) + "." + str(int((srcIP2[1:2].encode("hex")), 16)) + "." + str(int((srcIP2[2:3].encode("hex")), 16)) + "." + str(int((srcIP2[3:4].encode("hex")), 16))
catch = check_iface(srcConst)
srcMac = interface_tbl[int(catch)][0]
#split mac address to remove :
srcMac = srcMac.split(":")
#Change to Int then Binary and Concatenate
srcMacT = chr(int(srcMac[0], 16))
srcMacT += chr(int(srcMac[1], 16))
srcMacT += chr(int(srcMac[2], 16))
srcMacT += chr(int(srcMac[3], 16))
srcMacT += chr(int(srcMac[4], 16))
srcMacT += chr(int(srcMac[5], 16))
if DEBUG:
print srcMacT.encode("hex")
#rewrite items pkt
pkt = destMac + pkt[6:]
pkt = pkt[:6] + srcMacT + pkt[12:]
pkt = pkt[:21] + chr(2) + pkt[22:]
pkt = pkt[:22] + srcMacT + pkt[28:]
pkt = pkt[:28] + srcIP + pkt[32:]
pkt = pkt[:32] + destMac + pkt[38:]
pkt = pkt[:38] + destIP
if DEBUG:
print pkt.encode("hex")
#prep for sending
q = queues[iface]
try:
q.put( (ts,pkt) )
except QueueFullException:
# credit: Tyler
def processRip(pkt, iface, queues):
eth = dpkt.ethernet.Ethernet(pkt)
ip = eth.data
udp = ip.data
rip = dpkt.rip.RIP(udp.data)
for rte in rip.rtes:
#Process algorithm
if rte.metric > 0 and rte.metric < 16:
if DEBUG:
print "valid metric %d" % rte.metric
if DEBUG:
print "family %d" % rte.family
print '{0:32b}'.format(rte.addr)
print rte.addr
print '{0:32b}'.format(rte.subnet)
print rte.subnet
print rte.next_hop
def router_init(options, args):
global num_interfaces
global interface_tbl
global mac_tbl
global fwd_tbl
global fwd_tbl_srt
# Open up interfaces config File
# f = open("/Users/tylerfetters/Desktop/CS594/test/Project2/Project2/Project2/interfaces.conf")
f = open("M1-test04/interfaces.conf")
line_cnt = 0
for line in f:
line = line.split()
if line[0] == "#":
pass
else:
interface_tbl.append([])
interface_tbl[line_cnt].append(line[1])
netwk = ipaddr.IPv4Network(line[2])
interface_tbl[line_cnt].append(netwk)
interface_tbl[line_cnt].append(line[0])
line_cnt += 1
# Set Number of interfaces found in file
num_interfaces = line_cnt
#Open up static mac table
# f = open("/Users/tylerfetters/Desktop/CS594/test/Project2/Project2/Project2/MAC-address-table.txt")
f = open("M1-test04/MAC-address-table.txt")
for line in f:
line = line.split()
if line[0] == "#":
pass
else:
mac_tbl[line[0]] = line[1]
#Open up static forwarding table
# f= open("/Users/tylerfetters/Desktop/CS594/test/Project2/Project2/Project2/forwarding.conf")
f= open("M1-test04/forwarding.conf")
line_cnt = 0
for line in f:
line = line.split()
if line[0] == "#":
pass
else:
fwd_tbl.append([])
sub_line = line[0].split("/")
#Also superceed by ipaddr module
#sub_add = sub_line[0].split(".")
netwk = ipaddr.IPv4Network(line[0])
ipAd = ipaddr.IPv4Address(line[1])
fwd_tbl[line_cnt].append(netwk)
fwd_tbl[line_cnt].append(ipAd)
fwd_tbl[line_cnt].append(sub_line[1])
# This code has been commented as the IPAddr module better manages this process
#for s in sub_add:
#s = int(s)
#s = bin(s)
#sub_n = s[2:10]
#if sub_n.__len__() < 8:
#0's in front of base
#zeros = ""
#for i in range(sub_n.__len__(), 8):
#zeros += "0"
#sub_n = zeros + sub_n
#sub_con += sub_n
#entry = sub_con[0:int(sub_line[1])]
#fwd_tbl[entry] = line[1]
line_cnt += 1
fwd_tbl_srt = sorted(fwd_tbl, key=lambda x: int(x[2]), reverse=True)
if DEBUG:
print "Forwarding Table:"
print fwd_tbl_srt
if DEBUG:
print "%d interfaces \n" %num_interfaces
print "\n Interface Table"
print interface_tbl
print "\n Mac Address Table"
print mac_tbl
def callback(ts, pkt, iface, queues):
if DEBUG:
print ""
#The Response must be ignored if it is not from the RIP port. - Do Nothing
#The datagram's IPv4 source address should be checked to see whether the
#datagram is from a valid neighbor; the source of the datagram must be
#on a directly-connected network.
#catch IP then UDP
if pkt[12:14].encode("hex") == "0800" and pkt[23:24].encode("hex") == "11" :
#listen on port 502
if pkt[36:38].encode("hex") == "0208":
processRip(pkt, iface, queues)
#The datagram's IPv4 source address should be checked to see whether the
#datagram is from a valid neighbor; the source of the datagram must be
#on a directly-connected network.
#It is also worth checking to see whether the response is from one of the router's own addresses. - Do Nothing
else:
#verify no need to handle any other type of UDP packages
return
#check if arp, if so send an arp response.
if pkt[12:14].encode("hex") == "0806":
send_arp_resp(ts, iface, pkt, queues)
# check if the packet is for us, otherwise ignore it.
elif ethernet_for_us(pkt, iface) == 1:
# verify checksum. if invalid can drop the packet.
if valid_checksum(pkt) == 1:
outgoing_iface = forward_IP_packets_to_iface(pkt)
if DEBUG:
print "Outgoing Interface: %s" %int(outgoing_iface)
# decrement TTL
pkt = decrement_ttl(pkt)
if pkt != 0:
# recompute the corrected checksum:
pkt = recompute_checksum(pkt)
# then enqueue on the appropriate interface:
q = queues[int(outgoing_iface)]
try:
q.put( (ts,pkt) )
except QueueFullException:
drop_count += 1
def get_packet(g):
packet = None
try:
packet = g.next()
except StopIteration:
packet = None
return packet
def run_output_interface(iface_num, q, trans_delay):
# filename = "/Users/tylerfetters/Desktop/CS594/test/Project2/Project2/Project2/output-%d.pcap" % iface_num
filename = "/Users/stacy/Desktop/CS 594/proj2/output_test/output-%d.pcap" % iface_num
f = open(filename, "wb")
writer = dpkt.pcap.Writer(f)
while True:
p = q.get()
if p is None:
writer.close()
f.close()
break
ts, pkt = p
time.sleep(trans_delay)
writer.writepkt(pkt, ts+trans_delay)
if __name__ == "__main__":
# Seed the random number generator
random.seed()
# Parse command-line arguments
parser = OptionParser()
parser.add_option("-i", "--interfaces", dest="i", help="interface configuration file", default="interfaces.conf")
parser.add_option("-m", "--mac-addr-table", dest="m", help="MAC address table", default="MAC-address-table.txt")
parser.add_option("-f", "--forwarding-table", dest="f", help="forwarding table", default="forwarding.conf")
parser.add_option("-d", "--debug", dest="debug", action="store_true", help="turn on debugging output", default=False)
(options, args) = parser.parse_args()
DEBUG = options.debug
router_init(options, args)
# First, initialize our inputs
generators = {}
input_files = {}
for i in range(num_interfaces):
# f = open("/Users/tylerfetters/Desktop/CS594/test/Project2/Project2/Project2/input-%d.pcap" % i, "rb")
f = open("M1-test04/input-%d.pcap" % i, "rb")
input_files[i] = f
reader = dpkt.pcap.Reader(f)
generator = reader.__iter__()
generators[i] = generator
# Initialize our output interfaces
output_queues = {}
output_interfaces = {}
transmission_delay = 0.10
for i in range(num_interfaces):
output_queues[i] = Queue(10)
output_interfaces[i] = Process(target=run_output_interface, args=(i, output_queues[i], transmission_delay))
output_interfaces[i].start()
# h is a heap-based priority queue containing the next available packet from each interface.
h = []
# We start out by loading the first packet from each interface into h.
# We always use the heapq functions to access h; this way, we preserve the heap invariant.
for iface in generators.keys():
p = get_packet(generators[iface])
if p is not None:
ts, pkt = p
heapq.heappush(h, (ts, pkt, iface))
# Now we're ready to iterate over all the packets from all the input files.
# By using the heapq functions, we guarantee that we process the packets in
# the order of their arrival, even though they come from different input files.
ts = 0.0 # We keep track of the current packet's timestamp
prev_ts = 0.0 # And the previous packet's timestamp
# While there are packets left to process, process them!
while len(h) > 0:
# Pop the next packet off the heap
p = heapq.heappop(h)
# Unwrap the tuple. The heap contains triples of (timestamp, packet contents, interface number)
ts, pkt, iface = p
if DEBUG:
print "Next packet is from interface %d" % iface
# Inject some additional delay here to simulate processing in real time
interarrival_time = ts-prev_ts
if DEBUG:
print "Main driver process sleeping for %1.3fs" % interarrival_time
time.sleep(interarrival_time)
prev_ts = ts
# Call our callback function to handle the input packet
callback(ts, pkt, iface, output_queues)
p = get_packet(generators[iface])
if p is not None:
# The individual input generators provide us with timestamps and packet contents
ts, pkt = p
# We augment this data with the number of the input interface before putting it into the heap
# The input iface number will be helpful in deciding where to send the packet in callback()
heapq.heappush(h, (ts, pkt, iface))
else:
if DEBUG:
print "Interface %d has no more packets" % iface
# Now that we're done reading, we can close all of our input files.
for i in input_files.keys():
input_files[i].close()
# We also let our output interfaces know that it's time to shut down
for i in output_queues.keys():
output_queues[i].put(None)
# And we wait for the output interfaces to finish writing their packets to their pcap files
for i in output_interfaces.keys():
output_interfaces[i].join()
|
[
"dreamsong@gmail.com"
] |
dreamsong@gmail.com
|
04437caab3fc6597ee8e2f51e96b8e163651cd6c
|
beaa201c50588f215b44aae6dc57ad532edd240e
|
/powerof.py
|
2c3c1f9dca9df7887b159742dd63bdf91b39fb50
|
[] |
no_license
|
karu99/python
|
9e82daa48bdcba7ba195abe6bf05d4f31b1e56c0
|
b8ce59eb74bb5b0c6b0ae7a60b620e8d31feae9e
|
refs/heads/master
| 2020-06-14T10:14:09.304338
| 2019-07-16T09:29:06
| 2019-07-16T09:29:06
| 194,978,568
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
n,m=map(int,input().split())
flag=1
for i in range(1,100):
if(pow(m,i)==n):
print("yes")
flag=0
break
if(flag==1):
print("no")
|
[
"noreply@github.com"
] |
karu99.noreply@github.com
|
89b4f1722d80c8df78c45b08ff5fbee768e5c2cc
|
db35c005ee43fd572e8e5cb30addc710d06080d9
|
/escape-pods.py
|
3ab6039a6f759b6ce7efc3de48a70c5be8a4e19c
|
[] |
no_license
|
jamesa3/foo.bar
|
65b6720f69f38cb197616ac47308f14d0953f458
|
e448a222a79e71ac98ad985f922f0e616de86df7
|
refs/heads/master
| 2022-06-07T02:58:59.988039
| 2020-04-17T02:19:07
| 2020-04-17T02:19:07
| 246,315,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
def get_capacity_matrix(entrances, exits, m):
for i, row in enumerate(m):
row.insert(0, 0)
row.append(float('inf') if i in exits else 0)
entrances = [x + 1 for x in entrances]
c = list(m)
n = len(c) + 2
c.insert(0, [float('inf') if x in entrances else 0 for x in range(n)])
c.append([0 for x in range(n)])
return c
def get_adjacencies(c):
A = []
for row in c:
a_row = []
for i, cell in enumerate(row):
if cell:
a_row.append(i)
A.append(a_row)
return A
def breadth_first_search(C, A, s, t, F, M, visited):
queue = [s]
visited[s] = -2
while len(queue):
u = queue.pop(0)
for v in A[u]:
if visited[v] == -1 and C[u][v] - F[u][v] > 0:
visited[v] = u
M[v] = min(M[u], C[u][v] - F[u][v])
if v == t:
return M[v], visited
else:
queue.append(v)
return 0, visited
def solution(entrances, exits, path):
source = 0
sink = len(path) + 1
capacity_matrix = get_capacity_matrix(entrances, exits, path)
adjacencies = get_adjacencies(capacity_matrix)
n = len(capacity_matrix)
flow = 0
residual_capacity = [[0 for x in range(n)] for x in range(n)]
while True:
visited = [-1 for x in range(n)]
M = [0 for x in range(n)]
M[source] = float('inf')
path_flow, visited = breadth_first_search(
capacity_matrix,
adjacencies,
source,
sink,
residual_capacity,
M,
visited
)
if path_flow == 0:
break
flow += path_flow
v = sink
while v != source:
u = visited[v]
residual_capacity[u][v] += path_flow
residual_capacity[v][u] -= path_flow
v = u
return flow
def main():
print(solution([0], [3], [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]))
if __name__ == '__main__':
main()
|
[
"james@cnscloud.io"
] |
james@cnscloud.io
|
d01ed61eea9ac24d3da851fa306c831929e379e5
|
e47876a625a039ff75de1e0ec9d13322ee526612
|
/Unidad 13/LISTA_DOBLEMENTE_ENLAZADA.py
|
0fcad1ff9c6ecedbabd22ddf5069592325c529f7
|
[] |
no_license
|
ruizsugliani/Algoritmos-1-Essaya
|
a8a189fd6b7557566ed6c836711f0fa35f035066
|
56553b135f59fa830099fb894c076c8663af978e
|
refs/heads/main
| 2023-07-23T13:19:43.355425
| 2021-09-10T02:28:59
| 2021-09-10T02:28:59
| 404,926,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
class ListaDobleEnlazada:
def append(self, dato):
nuevo_nodo = _Nodo(dato)
if self.prim is None:
self.prim = nuevo_nodo
else:
actual = self.prim
while actual.prox:
actual.ant = actual
actual = actual.prox
actual.prox = nuevo_nodo
nuevo_nodo.ant = actual
self.cant += 1
def pop(self, i=None):
'''
DOC: Completar
'''
def __len__(self):
return self.cant
def __init__(self):
# prim es un _Nodo o None
self.prim = None
self.cant = 0
def __str__(self):
res = ""
act = self.prim
while act:
res += str(act.dato)
act = act.prox
res_f = "] -> [".join(res)
return f"[{res_f}]"
class _Nodo:
def __init__(self, dato, prox=None, ant=None):
self.dato = dato
self.prox = prox
self.ant = ant
l = ListaDobleEnlazada
l.append(1)
|
[
"noreply@github.com"
] |
ruizsugliani.noreply@github.com
|
3dbf3e87b4b004b83e913dd989ed2ab900c5eb16
|
b9e9c89567894fd7e5ddfd27fe9068a074a92df7
|
/pyramid_signup/tests/test_init.py
|
d1de398dc5429102f7465cb8ee45667f5212c697
|
[] |
no_license
|
AnneGilles/pyramid_signup
|
8aeea113176dd64a326caa5f7704026e0538c94a
|
0622d951e686f0926291d98559a6b4afa2c81241
|
refs/heads/master
| 2021-01-18T17:48:48.260300
| 2011-12-09T04:56:44
| 2011-12-09T04:56:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
from pyramid import testing
from pyramid_signup.models import User
from pyramid_signup.tests import UnitTestBase
from mock import patch
from mock import Mock
class TestInitCase(UnitTestBase):
def test_root_factory(self):
from pyramid_signup import RootFactory
from pyramid.security import Everyone
from pyramid.security import Authenticated
from pyramid.security import Allow
from pyramid.security import ALL_PERMISSIONS
root_factory = RootFactory(testing.DummyRequest())
assert len(root_factory.__acl__) == 2
for ace in root_factory.__acl__:
assert ace[0] == Allow
if ace[1] == 'group:admin':
assert ace[2] == ALL_PERMISSIONS
elif ace[1] == Authenticated:
assert ace[2] == 'view'
def test_request_factory(self):
from pyramid_signup import SignUpRequestFactory
user1 = User(username='sontek', first_name='john')
self.session.add(user1)
self.session.flush()
with patch('pyramid_signup.unauthenticated_userid') as unauth:
unauth.return_value = 1
request = SignUpRequestFactory({})
request.registry = Mock()
getUtility = Mock()
getUtility.return_value = self.session
request.registry.getUtility = getUtility
user = request.user
assert user == user1
|
[
"sontek@gmail.com"
] |
sontek@gmail.com
|
c117cfa407d725ed6b6888c431a45f884438df39
|
895aed7b64768882783e1b5af6c6f09fccff0145
|
/py/demoled.py
|
f320d122393ee082320fb5c0c4fa23dde71adbdd
|
[] |
no_license
|
cghercoias/LPD8806_PI
|
380936a0502b33b73262ffd08fd52ba0fe2117b9
|
1a31ea0b902e2f191b2c927c6a7505d729df6a84
|
refs/heads/master
| 2022-07-07T06:42:05.295834
| 2018-01-28T03:40:14
| 2018-01-28T03:40:14
| 118,633,055
| 2
| 2
| null | 2022-06-17T23:53:40
| 2018-01-23T15:56:44
|
Python
|
UTF-8
|
Python
| false
| false
| 331
|
py
|
#!/usr/bin/python
from bootstrap import *
from raspledstrip.animation import *
colors = [
(255.0, 0.0, 0.0),
(0.0, 255.0, 0.0),
(0.0, 0.0, 255.0),
(255.0, 255.0, 255.0),
]
while (1>0):
print "starting"
anim = RainbowCycle(led)
for i in range(384 * 2):
anim.step()
led.update()
led.fill_off()
|
[
"catalin.ghercoias@gmail.com"
] |
catalin.ghercoias@gmail.com
|
94b05859d11c2a2649a54bca4da3169f4c5a2088
|
905a984414e3b7ecfd7cb2999121606b576ce5cd
|
/flask_app/footballInfo/routesFootball.py
|
85cb29ee69572d996012fd2c5dd845e4f963125d
|
[] |
no_license
|
GuptaSiddhesh/Football-Rating-and-Review
|
2dcd720a99e1b7caff93993eb75eadf279975cf2
|
fb64ea6bf7787b5ace0ea0c597f8d7ad819c8816
|
refs/heads/main
| 2023-04-20T16:55:29.486948
| 2021-05-13T04:05:07
| 2021-05-13T04:05:07
| 366,276,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
from flask import render_template, request, redirect, url_for, flash, Response
from flask import Blueprint
from flask import session
from flask_mongoengine import MongoEngine
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
# stdlib
from datetime import datetime
from .. import app, bcrypt, client
from ..forms import (PlayerCommentForm)
from ..models import User, Comments, load_user
from ..utils import current_time
football = Blueprint('football', __name__)
@football.route('/players/<player_id>', methods=['GET', 'POST'])
def player_detail(player_id):
r = client.getPlayerByID(player_id)
if type(r) == dict:
return render_template('player_detail.html', error_msg=r['error'])
form = PlayerCommentForm()
if form.validate_on_submit():
comment = Comments(
commenter=load_user(current_user.username),
content=form.text.data,
draftRound=form.draftRound.data,
playAgain=form.playAgain.data,
date=current_time(),
player_id=player_id,
player_name=r.fullname
)
comment.save()
return redirect(request.path)
comm= Comments.objects(player_id=player_id)
print(current_user.is_authenticated)
return render_template('player_detail.html', form=form, player=r, reviews=comm)
@football.route('/team-results/<query>', methods=['GET'])
def team_results(query):
if query != 'ALL':
r =client.get_players_by_team(query)
else:
r = client.getAll()
if type(r) == dict:
return render_template('query.html', error_msg=r['error'])
return render_template('query.html', results=r)
|
[
"36742817+pbangera@users.noreply.github.com"
] |
36742817+pbangera@users.noreply.github.com
|
99e4d37b3396f9e02bf0b8bd87a5011fe44a4a26
|
1032053bae3f0d734ff20125cc9bf2c662866e30
|
/algorithms/python/sorting/counting_sort.py
|
742807eeac5c7e60f3188693e214d341c25bb55b
|
[] |
no_license
|
ikvibhav/DSA_Fundamentals
|
331254364c19ff95cfdd50823a94dd7c4c7982d6
|
267071f578bcdf2f307269a4d16ab668443fdb5d
|
refs/heads/main
| 2023-06-26T05:04:44.090942
| 2021-07-20T08:18:57
| 2021-07-20T08:18:57
| 381,833,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,865
|
py
|
'''
#Assuming the starting of the sequence as 0
def counting_sort(lst):
#Extract the range of the lst values. Create an index_count array using this.
index_count = [0 for _ in range(max(lst))]
#Increment the count for each value in the index array
for _,lst_item in enumerate(lst):
index_count[lst_item-1] += 1
#Create a cummulative sum of the index values
for j in range(len(index_count)-1):
index_count[j+1] = index_count[j+1] + index_count[j]
#Create an output list
output_lst = [0 for _ in range(len(lst))]
#Add corresponding values to the output list
for _,lst_item in enumerate(lst):
output_lst[index_count[lst_item-1]-1] = lst_item
index_count[lst_item-1] -= 1
return output_lst
'''
#For Negetive Numbers also
def counting_sort(lst):
#Extract the range of the lst values. Create an index_count array using this.
min_val = min(lst)
max_val = max(lst)
#Extra 1 is to incorporate 0
range_val = max_val - min_val + 1
index_count = [0 for _ in range(range_val)]
#Increment the count for each value in the index array
for _,lst_item in enumerate(lst):
index_count[lst_item - min_val] += 1
#Create a cummulative sum of the index values
for j in range(len(index_count)-1):
index_count[j+1] = index_count[j+1] + index_count[j]
#Create an output list
output_lst = [0 for _ in range(len(lst))]
#Add corresponding values to the output list
#for i in range(len(lst)):
# output_lst[index_count[lst[i]-min_val]-1] = lst[i]
# index_count[lst[i]-min_val] -= 1
for _,lst_item in enumerate(lst):
output_lst[index_count[lst_item-min_val]-1] = lst_item
index_count[lst_item-min_val] -= 1
return output_lst
a = [-11,6,4,8,6,4,2,3]
print(a)
print(counting_sort(a))
|
[
"ikvibhav@gmail.com"
] |
ikvibhav@gmail.com
|
0b35dcbf119952569f6b72bfb2a263970be81c51
|
92b2914d39142f241b9003b3dd0c36e64ae3dce0
|
/Learning/Courses/Applied Machine Learning in Python/Week2/Classifier+Visualization.py
|
caeb3a55011971016dfa1b33714ef79c22b4e575
|
[] |
no_license
|
akanumur/Data_Science_Everyday
|
1dd492cb9e48fd225ed8516ee79dd668f5b636b0
|
442784fe2c968723cd54d584ca3dfec626238960
|
refs/heads/master
| 2022-12-12T07:53:25.639936
| 2020-08-21T23:19:54
| 2020-08-21T23:19:54
| 268,927,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,124
|
py
|
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
#
# ---
# # Classifier Visualization Playground
#
# The purpose of this notebook is to let you visualize various classsifiers' decision boundaries.
#
# The data used in this notebook is based on the [UCI Mushroom Data Set](http://archive.ics.uci.edu/ml/datasets/Mushroom?ref=datanews.io) stored in `mushrooms.csv`.
#
# In order to better vizualize the decision boundaries, we'll perform Principal Component Analysis (PCA) on the data to reduce the dimensionality to 2 dimensions. Dimensionality reduction will be covered in a later module of this course.
#
# Play around with different models and parameters to see how they affect the classifier's decision boundary and accuracy!
# In[2]:
get_ipython().magic('matplotlib notebook')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
df = pd.read_csv('readonly/mushrooms.csv')
print(df)
# In[4]:
df2 = pd.get_dummies(df)
print(df2.head)
# In[5]:
df3 = df2.sample(frac=0.08)
X = df3.iloc[:,2:]
y = df3.iloc[:,1]
pca = PCA(n_components=2).fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(pca, y, random_state=0)
plt.figure(dpi=120)
plt.scatter(pca[y.values==0,0], pca[y.values==0,1], alpha=0.5, label='Edible', s=2)
plt.scatter(pca[y.values==1,0], pca[y.values==1,1], alpha=0.5, label='Poisonous', s=2)
plt.legend()
plt.title('Mushroom Data Set\nFirst Two Principal Components')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.gca().set_aspect('equal')
# In[6]:
def plot_mushroom_boundary(X, y, fitted_model):
plt.figure(figsize=(9.8,5), dpi=100)
for i, plot_type in enumerate(['Decision Boundary', 'Decision Probabilities']):
plt.subplot(1,2,i+1)
mesh_step_size = 0.01 # step size in the mesh
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, mesh_step_size), np.arange(y_min, y_max, mesh_step_size))
if i == 0:
Z = fitted_model.predict(np.c_[xx.ravel(), yy.ravel()])
else:
try:
Z = fitted_model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]
except:
plt.text(0.4, 0.5, 'Probabilities Unavailable', horizontalalignment='center',
verticalalignment='center', transform = plt.gca().transAxes, fontsize=12)
plt.axis('off')
break
Z = Z.reshape(xx.shape)
plt.scatter(X[y.values==0,0], X[y.values==0,1], alpha=0.4, label='Edible', s=5)
plt.scatter(X[y.values==1,0], X[y.values==1,1], alpha=0.4, label='Posionous', s=5)
plt.imshow(Z, interpolation='nearest', cmap='RdYlBu_r', alpha=0.15,
extent=(x_min, x_max, y_min, y_max), origin='lower')
plt.title(plot_type + '\n' +
str(fitted_model).split('(')[0]+ ' Test Accuracy: ' + str(np.round(fitted_model.score(X, y), 5)))
plt.gca().set_aspect('equal');
plt.tight_layout()
plt.subplots_adjust(top=0.9, bottom=0.08, wspace=0.02)
# In[7]:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[8]:
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=20)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[9]:
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(max_depth=3)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[10]:
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[11]:
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[ ]:
from sklearn.svm import SVC
model = SVC(kernel='linear')
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[12]:
from sklearn.svm import SVC
model = SVC(kernel='rbf', C=1)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[13]:
from sklearn.svm import SVC
model = SVC(kernel='rbf', C=10)
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[14]:
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[15]:
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
model.fit(X_train,y_train)
plot_mushroom_boundary(X_test, y_test, model)
# In[ ]:
|
[
"akanumur@uncc.edu"
] |
akanumur@uncc.edu
|
763490ed091de4d95628dd13632ec53d95b9087f
|
1e418971e4d491f154b27b4bf1a9827b79a4ecc2
|
/pyBoard/pyBoard/urls.py
|
dbd6a9adda7d0787e34addff4082b473e405fa3f
|
[] |
no_license
|
JaeyongChoi4666/Django_work
|
c2e82b133608cacea1c087b68419ac384ae4d484
|
502ab87b839db50ffc245cb47d12a5d241e72c4b
|
refs/heads/main
| 2023-06-03T01:10:42.095417
| 2021-06-16T07:23:30
| 2021-06-16T07:23:30
| 372,723,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
"""pyBoard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from board import views
urlpatterns = [
path('admin/', admin.site.urls),
path('list/', views.list),
path('write/', views.write_form),
path('insert/', views.insert),
path('download/', views.download),
path('detail/', views.detail),
path('update/', views.update),
path('delete/', views.delete),
path('reply_insert/', views.reply_insert),
path('reply_delete/', views.reply_delete),
path('json_test/', views.json_test),
]
|
[
"jychoi2@naver.com"
] |
jychoi2@naver.com
|
36d1cdb0cf14edfe05793a672c0556d8c5875baa
|
d1e4f29e583ee964d63bc48554eaa73d67d58eb2
|
/zerver/migrations/0222_userprofile_fluid_layout_width.py
|
3b5c232bee7088bb888cc76437ff9bc3df92ee7b
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
hygolei/zulip
|
299f636f9238f50b0d2746f1c371748f182f1f4e
|
39fe66ab0824bc439929debeb9883c3046c6ed70
|
refs/heads/master
| 2023-07-11T22:50:27.434398
| 2021-08-09T10:07:35
| 2021-08-09T10:07:35
| 375,401,165
| 1
| 1
|
Apache-2.0
| 2021-08-09T10:07:36
| 2021-06-09T15:20:09
|
Python
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
# Generated by Django 1.11.20 on 2019-04-15 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0221_subscription_notifications_data_migration"),
]
operations = [
migrations.AddField(
model_name="userprofile",
name="fluid_layout_width",
field=models.BooleanField(default=False),
),
]
|
[
"tabbott@zulipchat.com"
] |
tabbott@zulipchat.com
|
308bff52ce577ba49c9ba46d0fd7277f04669f7f
|
0e94b21a64e01b992cdc0fff274af8d77b2ae430
|
/spider/004_kekeenglish_daysentence.py
|
35df50ab90f89a35b0fc40370d4b4fef02e20b22
|
[] |
no_license
|
yangnaGitHub/LearningProcess
|
1aed2da306fd98f027dcca61309082f42b860975
|
250a8b791f7deda1e716f361a2f847f4d12846d3
|
refs/heads/master
| 2020-04-15T16:49:38.053846
| 2019-09-05T05:52:04
| 2019-09-05T05:52:04
| 164,852,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 13:18:16 2018
@author: Administrator
"""
import re
from lxml import etree
import urllib.request
import xlwt
response=urllib.request.urlopen('http://www.kekenet.com/kouyu/primary/chuji/')
html = response.read().decode("utf-8")
tr = etree.HTML(html)
#//div[@class="tb-btn-wait"]
#//ul[contains(@class,"J_TSaleProp")]
#//div[contains(@class,"tb-btn-buy")]/a[@id="J_LinkBuy"]
#contents = tr.xpath('//ul[@id="menu-list"]/li')
contents = tr.xpath('//div[@class="page th"]/a')
total_pages = 0
for content in contents:
total_pages = max(total_pages, int(content.text))
book = xlwt.Workbook()
sheet = book.add_sheet('translation')
row = 0
contentTexts = {}
errorRecords = {}
for page in range(total_pages, 0, -1):
if total_pages != page:
response=urllib.request.urlopen('http://www.kekenet.com/kouyu/primary/chuji/List_%d.shtml' % page)
html = response.read().decode("utf-8")
tr = etree.HTML(html)
allTests = tr.xpath("//text()")#所有的文本
contents = tr.xpath('//ul[@id="menu-list"]/li/h2/a')
prepareTexts = []
for content in contents:
prepareTexts.append(content.text)
for index, allTest in enumerate(allTests):
if allTest in prepareTexts:
needText = allTests[index + 3].replace('\n', '').replace(',', ',').replace('。', '.')
if re.findall('^[a-zA-Z]', needText):
pass
else:
needText = allTests[index + 2].replace('\n', '').replace(',', ',').replace('。', '.')
try:
slicePos = needText.find(re.findall('[\u2E80-\u9FFF]+', needText)[0])
contentTexts[needText[:slicePos].replace('\n', '')] = needText[slicePos:].replace('\n', '').replace(',', ',').replace('。', '.')
firstStr = needText[:slicePos].replace('\n', '')
secondStr = needText[slicePos:].replace('\n', '').replace(',', ',').replace('。', '.')
except IndexError:
print('find error (%d %d %d: %s)' % (page, index, row+1, allTest))
errorRecords[str(page) + str(index) + str(row+1)] = allTests
firstStr = ''
secondStr = ''
sheet.write(row, 0, firstStr)
sheet.write(row, 1, secondStr)
row += 1
book.save('translation.xlsx')
|
[
"ityangna0402@163.com"
] |
ityangna0402@163.com
|
f62998c30aabd3f2ae38cf6aa13b33f4456ef7e1
|
d0fe389bae13abfc9d666dc880c50b894b7c212d
|
/software/tool/test_pipeline/move_file.py
|
5fd75b3eebadef6c39cadc438cc9d2d6974eda57
|
[] |
no_license
|
ab3nd/TinyRobo
|
965c060e95ef6446a609b4954dda042d1ff16311
|
b86d2f716fea4bcc420f81e1903484554fb33b51
|
refs/heads/master
| 2020-04-12T08:49:45.086755
| 2019-07-11T01:59:05
| 2019-07-11T01:59:05
| 39,583,602
| 7
| 2
| null | 2018-07-10T20:05:36
| 2015-07-23T18:17:14
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 819
|
py
|
#!/usr/bin/python
#Get a file that starts with "recognizer_test" in the ~/.ros/ directory, and move it to a new directory
import json
import rosbag
import rospy
import os
import fnmatch
import yaml
#From https://stackoverflow.com/questions/1724693/find-a-file-in-python
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
path = "/home/ams/.ros"
pattern = "recognizer_test*"
#Get the files
files = find(pattern, path)
#Because the file names contains dates, this should more or less get the oldest one
oldName = sorted(files)[0]
#Move it to an appropriately named directory
os.renames(oldName, "test_{0}/{0}_{0}.bag".format('foo'))
|
[
"orphrey@gmail.com"
] |
orphrey@gmail.com
|
14147605374f069cff8d2de50567bb9cf4e835a8
|
7949f96ee7feeaa163608dbd256b0b76d1b89258
|
/toontown/building/DistributedDoor.py
|
c8656242ed57b765f03716bc377f99ceb20175d6
|
[] |
no_license
|
xxdecryptionxx/ToontownOnline
|
414619744b4c40588f9a86c8e01cb951ffe53e2d
|
e6c20e6ce56f2320217f2ddde8f632a63848bd6b
|
refs/heads/master
| 2021-01-11T03:08:59.934044
| 2018-07-27T01:26:21
| 2018-07-27T01:26:21
| 71,086,644
| 8
| 10
| null | 2018-06-01T00:13:34
| 2016-10-17T00:39:41
|
Python
|
UTF-8
|
Python
| false
| false
| 28,605
|
py
|
# File: t (Python 2.4)
from toontown.toonbase.ToonBaseGlobal import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.distributed import DistributedObject
from toontown.hood import ZoneUtil
from toontown.suit import Suit
from toontown.distributed import DelayDelete
import FADoorCodes
from direct.task.Task import Task
import DoorTypes
from toontown.toontowngui import TTDialog
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TeaserPanel
from toontown.distributed.DelayDeletable import DelayDeletable
if __debug__:
import pdb
class DistributedDoor(DistributedObject.DistributedObject, DelayDeletable):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.openSfx = base.loadSfx('phase_3.5/audio/sfx/Door_Open_1.mp3')
self.closeSfx = base.loadSfx('phase_3.5/audio/sfx/Door_Close_1.mp3')
self.nametag = None
self.fsm = ClassicFSM.ClassicFSM('DistributedDoor_right', [
State.State('off', self.enterOff, self.exitOff, [
'closing',
'closed',
'opening',
'open']),
State.State('closing', self.enterClosing, self.exitClosing, [
'closed',
'opening']),
State.State('closed', self.enterClosed, self.exitClosed, [
'opening']),
State.State('opening', self.enterOpening, self.exitOpening, [
'open']),
State.State('open', self.enterOpen, self.exitOpen, [
'closing',
'open'])], 'off', 'off')
self.fsm.enterInitialState()
self.exitDoorFSM = ClassicFSM.ClassicFSM('DistributedDoor_left', [
State.State('off', self.exitDoorEnterOff, self.exitDoorExitOff, [
'closing',
'closed',
'opening',
'open']),
State.State('closing', self.exitDoorEnterClosing, self.exitDoorExitClosing, [
'closed',
'opening']),
State.State('closed', self.exitDoorEnterClosed, self.exitDoorExitClosed, [
'opening']),
State.State('opening', self.exitDoorEnterOpening, self.exitDoorExitOpening, [
'open']),
State.State('open', self.exitDoorEnterOpen, self.exitDoorExitOpen, [
'closing',
'open'])], 'off', 'off')
self.exitDoorFSM.enterInitialState()
self.specialDoorTypes = {
DoorTypes.EXT_HQ: 0,
DoorTypes.EXT_COGHQ: 0,
DoorTypes.INT_COGHQ: 0,
DoorTypes.EXT_KS: 0,
DoorTypes.INT_KS: 0 }
self.doorX = 1.5
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.avatarTracks = []
self.avatarExitTracks = []
self.avatarIDList = []
self.avatarExitIDList = []
self.doorTrack = None
self.doorExitTrack = None
def disable(self):
self.clearNametag()
taskMgr.remove(self.checkIsDoorHitTaskName())
self.ignore(self.getEnterTriggerEvent())
self.ignore(self.getExitTriggerEvent())
self.ignore('clearOutToonInterior')
self.fsm.request('off')
self.exitDoorFSM.request('off')
if self.__dict__.has_key('building'):
del self.building
self.finishAllTracks()
self.avatarIDList = []
self.avatarExitIDList = []
if hasattr(self, 'tempDoorNodePath'):
self.tempDoorNodePath.removeNode()
del self.tempDoorNodePath
DistributedObject.DistributedObject.disable(self)
def delete(self):
del self.fsm
del self.exitDoorFSM
del self.openSfx
del self.closeSfx
DistributedObject.DistributedObject.delete(self)
def wantsNametag(self):
return not ZoneUtil.isInterior(self.zoneId)
def setupNametag(self):
if not self.wantsNametag():
return None
if self.nametag == None:
self.nametag = NametagGroup()
self.nametag.setFont(ToontownGlobals.getBuildingNametagFont())
if TTLocalizer.BuildingNametagShadow:
self.nametag.setShadow(*TTLocalizer.BuildingNametagShadow)
self.nametag.setContents(Nametag.CName)
self.nametag.setColorCode(NametagGroup.CCToonBuilding)
self.nametag.setActive(0)
self.nametag.setAvatar(self.getDoorNodePath())
self.nametag.setObjectCode(self.block)
name = self.cr.playGame.dnaStore.getTitleFromBlockNumber(self.block)
self.nametag.setName(name)
self.nametag.manage(base.marginManager)
def clearNametag(self):
if self.nametag != None:
self.nametag.unmanage(base.marginManager)
self.nametag.setAvatar(NodePath())
self.nametag = None
def getTriggerName(self):
if self.doorType == DoorTypes.INT_HQ or self.specialDoorTypes.has_key(self.doorType):
return 'door_trigger_' + str(self.block) + '_' + str(self.doorIndex)
else:
return 'door_trigger_' + str(self.block)
def getTriggerName_wip(self):
name = 'door_trigger_%d' % (self.doId,)
return name
def getEnterTriggerEvent(self):
return 'enter' + self.getTriggerName()
def getExitTriggerEvent(self):
return 'exit' + self.getTriggerName()
def hideDoorParts(self):
if self.specialDoorTypes.has_key(self.doorType):
self.hideIfHasFlat(self.findDoorNode('rightDoor'))
self.hideIfHasFlat(self.findDoorNode('leftDoor'))
self.findDoorNode('doorFrameHoleRight').hide()
self.findDoorNode('doorFrameHoleLeft').hide()
else:
return None
def setTriggerName(self):
if self.specialDoorTypes.has_key(self.doorType):
building = self.getBuilding()
doorTrigger = building.find('**/door_' + str(self.doorIndex) + '/**/door_trigger*')
doorTrigger.node().setName(self.getTriggerName())
else:
return None
def setTriggerName_wip(self):
building = self.getBuilding()
doorTrigger = building.find('**/door_%d/**/door_trigger_%d' % (self.doorIndex, self.block))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_trigger_%d' % (self.block,))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_%d/**/door_trigger_*' % (self.doorIndex,))
if doorTrigger.isEmpty():
doorTrigger = building.find('**/door_trigger_*')
doorTrigger.node().setName(self.getTriggerName())
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def setDoorType(self, doorType):
self.notify.debug('Door type = ' + str(doorType) + ' on door #' + str(self.doId))
self.doorType = doorType
def setDoorIndex(self, doorIndex):
self.doorIndex = doorIndex
def setSwing(self, flags):
self.leftSwing = flags & 1 != 0
self.rightSwing = flags & 2 != 0
def setOtherZoneIdAndDoId(self, zoneId, distributedObjectID):
self.otherZoneId = zoneId
self.otherDoId = distributedObjectID
def setState(self, state, timestamp):
self.fsm.request(state, [
globalClockDelta.localElapsedTime(timestamp)])
def setExitDoorState(self, state, timestamp):
self.exitDoorFSM.request(state, [
globalClockDelta.localElapsedTime(timestamp)])
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.doPostAnnounceGenerate()
def doPostAnnounceGenerate(self):
if self.doorType == DoorTypes.INT_STANDARD:
self.bHasFlat = True
else:
self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty()
self.hideDoorParts()
self.setTriggerName()
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.acceptOnce('clearOutToonInterior', self.doorTrigger)
self.setupNametag()
def getBuilding(self):
if not self.__dict__.has_key('building'):
if self.doorType == DoorTypes.INT_STANDARD:
door = render.find('**/leftDoor;+s')
self.building = door.getParent()
elif self.doorType == DoorTypes.INT_HQ:
door = render.find('**/door_0')
self.building = door.getParent()
elif self.doorType == DoorTypes.INT_KS:
self.building = render.find('**/KartShop_Interior*')
elif self.doorType == DoorTypes.EXT_STANDARD and self.doorType == DoorTypes.EXT_HQ or self.doorType == DoorTypes.EXT_KS:
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':*_landmark_*_DNARoot;+s')
if self.building.isEmpty():
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':animated_building_*_DNARoot;+s')
elif self.doorType == DoorTypes.EXT_COGHQ or self.doorType == DoorTypes.INT_COGHQ:
self.building = self.cr.playGame.hood.loader.geom
else:
self.notify.error('No such door type as ' + str(self.doorType))
return self.building
def getBuilding_wip(self):
if not self.__dict__.has_key('building'):
if self.__dict__.has_key('block'):
self.building = self.cr.playGame.hood.loader.geom.find('**/??' + str(self.block) + ':*_landmark_*_DNARoot;+s')
else:
self.building = self.cr.playGame.hood.loader.geom
print '---------------- door is interior -------'
return self.building
def readyToExit(self):
base.transitions.fadeScreen(1.0)
self.sendUpdate('requestExit')
def avatarEnterDoorTrack(self, avatar, duration):
trackName = 'avatarEnterDoor-%d-%d' % (self.doId, avatar.doId)
track = Parallel(name = trackName)
otherNP = self.getDoorNodePath()
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
if avatar.doId == base.localAvatar.doId:
track.append(LerpPosHprInterval(nodePath = camera, other = avatar, duration = duration, pos = Point3(0, -8, avatar.getHeight()), hpr = VBase3(0, 0, 0), blendType = 'easeInOut'))
finalPos = avatar.getParent().getRelativePoint(otherNP, Point3(self.doorX, 2, ToontownGlobals.FloorOffset))
moveHere = Sequence(self.getAnimStateInterval(avatar, 'walk'), LerpPosInterval(nodePath = avatar, duration = duration, pos = finalPos, blendType = 'easeIn'))
track.append(moveHere)
if avatar.doId == base.localAvatar.doId:
track.append(Sequence(Wait(duration * 0.5), Func(base.transitions.irisOut, duration * 0.5), Wait(duration * 0.5), Func(avatar.b_setParent, ToontownGlobals.SPHidden)))
track.delayDelete = DelayDelete.DelayDelete(avatar, 'avatarEnterDoorTrack')
return track
def avatarEnqueueTrack(self, avatar, duration):
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
back = -5.0 - 2.0 * len(self.avatarIDList)
if back < -9.0:
back = -9.0
offset = Point3(self.doorX, back, ToontownGlobals.FloorOffset)
otherNP = self.getDoorNodePath()
walkLike = ActorInterval(avatar, 'walk', startTime = 1, duration = duration, endTime = 0.0001)
standHere = Sequence(LerpPosHprInterval(nodePath = avatar, other = otherNP, duration = duration, pos = offset, hpr = VBase3(0, 0, 0), blendType = 'easeInOut'), self.getAnimStateInterval(avatar, 'neutral'))
trackName = 'avatarEnqueueDoor-%d-%d' % (self.doId, avatar.doId)
track = Parallel(walkLike, standHere, name = trackName)
track.delayDelete = DelayDelete.DelayDelete(avatar, 'avatarEnqueueTrack')
return track
def getAnimStateInterval(self, avatar, animName):
isSuit = isinstance(avatar, Suit.Suit)
if isSuit:
return Func(avatar.loop, animName, 0)
else:
return Func(avatar.setAnimState, animName)
def isDoorHit(self):
vec = base.localAvatar.getRelativeVector(self.currentDoorNp, self.currentDoorVec)
netScale = self.currentDoorNp.getNetTransform().getScale()
yToTest = vec.getY() / netScale[1]
return yToTest < -0.5
def enterDoor(self):
if self.allowedToEnter():
messenger.send('DistributedDoor_doorTrigger')
self.sendUpdate('requestEnter')
else:
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('stopped')
self.dialog = TeaserPanel.TeaserPanel(pageName = 'otherHoods', doneFunc = self.handleOkTeaser)
def handleOkTeaser(self):
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def allowedToEnter(self, zoneId = None):
allowed = False
if hasattr(base, 'ttAccess') and base.ttAccess:
if zoneId:
allowed = base.ttAccess.canAccess(zoneId)
else:
allowed = base.ttAccess.canAccess()
return allowed
def checkIsDoorHitTaskName(self):
return 'checkIsDoorHit' + self.getTriggerName()
def checkIsDoorHitTask(self, task):
if self.isDoorHit():
self.ignore(self.checkIsDoorHitTaskName())
self.ignore(self.getExitTriggerEvent())
self.enterDoor()
return Task.done
return Task.cont
def cancelCheckIsDoorHitTask(self, args):
taskMgr.remove(self.checkIsDoorHitTaskName())
del self.currentDoorNp
del self.currentDoorVec
self.ignore(self.getExitTriggerEvent())
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
def doorTrigger(self, args = None):
self.ignore(self.getEnterTriggerEvent())
if args == None:
self.enterDoor()
else:
self.currentDoorNp = NodePath(args.getIntoNodePath())
self.currentDoorVec = Vec3(args.getSurfaceNormal(self.currentDoorNp))
if self.isDoorHit():
self.enterDoor()
else:
self.accept(self.getExitTriggerEvent(), self.cancelCheckIsDoorHitTask)
taskMgr.add(self.checkIsDoorHitTask, self.checkIsDoorHitTaskName())
def avatarEnter(self, avatarID):
avatar = self.cr.doId2do.get(avatarID, None)
if avatar:
avatar.setAnimState('neutral')
track = self.avatarEnqueueTrack(avatar, 0.5)
track.start()
self.avatarTracks.append(track)
self.avatarIDList.append(avatarID)
def rejectEnter(self, reason):
message = FADoorCodes.reasonDict[reason]
if message:
self._DistributedDoor__faRejectEnter(message)
else:
self._DistributedDoor__basicRejectEnter()
def _DistributedDoor__basicRejectEnter(self):
self.accept(self.getEnterTriggerEvent(), self.doorTrigger)
if self.cr.playGame.getPlace():
self.cr.playGame.getPlace().setState('walk')
def _DistributedDoor__faRejectEnter(self, message):
self.rejectDialog = TTDialog.TTGlobalDialog(message = message, doneEvent = 'doorRejectAck', style = TTDialog.Acknowledge)
self.rejectDialog.show()
self.rejectDialog.delayDelete = DelayDelete.DelayDelete(self, '__faRejectEnter')
event = 'clientCleanup'
self.acceptOnce(event, self._DistributedDoor__handleClientCleanup)
base.cr.playGame.getPlace().setState('stopped')
self.acceptOnce('doorRejectAck', self._DistributedDoor__handleRejectAck)
self.acceptOnce('stoppedAsleep', self._DistributedDoor__handleFallAsleepDoor)
def _DistributedDoor__handleClientCleanup(self):
if hasattr(self, 'rejectDialog') and self.rejectDialog:
self.rejectDialog.doneStatus = 'ok'
self._DistributedDoor__handleRejectAck()
def _DistributedDoor__handleFallAsleepDoor(self):
self.rejectDialog.doneStatus = 'ok'
self._DistributedDoor__handleRejectAck()
def _DistributedDoor__handleRejectAck(self):
self.ignore('doorRejectAck')
self.ignore('stoppedAsleep')
self.ignore('clientCleanup')
doneStatus = self.rejectDialog.doneStatus
if doneStatus != 'ok':
self.notify.error('Unrecognized doneStatus: ' + str(doneStatus))
self._DistributedDoor__basicRejectEnter()
self.rejectDialog.delayDelete.destroy()
self.rejectDialog.cleanup()
del self.rejectDialog
def getDoorNodePath(self):
if self.doorType == DoorTypes.INT_STANDARD:
otherNP = render.find('**/door_origin')
elif self.doorType == DoorTypes.EXT_STANDARD:
if hasattr(self, 'tempDoorNodePath'):
return self.tempDoorNodePath
else:
posHpr = self.cr.playGame.dnaStore.getDoorPosHprFromBlockNumber(self.block)
otherNP = NodePath('doorOrigin')
otherNP.setPos(posHpr.getPos())
otherNP.setHpr(posHpr.getHpr())
self.tempDoorNodePath = otherNP
elif self.specialDoorTypes.has_key(self.doorType):
building = self.getBuilding()
otherNP = building.find('**/door_origin_' + str(self.doorIndex))
elif self.doorType == DoorTypes.INT_HQ:
otherNP = render.find('**/door_origin_' + str(self.doorIndex))
else:
self.notify.error('No such door type as ' + str(self.doorType))
return otherNP
def avatarExitTrack(self, avatar, duration):
if hasattr(avatar, 'stopSmooth'):
avatar.stopSmooth()
otherNP = self.getDoorNodePath()
trackName = 'avatarExitDoor-%d-%d' % (self.doId, avatar.doId)
track = Sequence(name = trackName)
track.append(self.getAnimStateInterval(avatar, 'walk'))
track.append(PosHprInterval(avatar, Point3(-(self.doorX), 0, ToontownGlobals.FloorOffset), VBase3(179, 0, 0), other = otherNP))
track.append(Func(avatar.setParent, ToontownGlobals.SPRender))
if avatar.doId == base.localAvatar.doId:
track.append(PosHprInterval(camera, VBase3(-(self.doorX), 5, avatar.getHeight()), VBase3(180, 0, 0), other = otherNP))
if avatar.doId == base.localAvatar.doId:
finalPos = render.getRelativePoint(otherNP, Point3(-(self.doorX), -6, ToontownGlobals.FloorOffset))
else:
finalPos = render.getRelativePoint(otherNP, Point3(-(self.doorX), -3, ToontownGlobals.FloorOffset))
track.append(LerpPosInterval(nodePath = avatar, duration = duration, pos = finalPos, blendType = 'easeInOut'))
if avatar.doId == base.localAvatar.doId:
track.append(Func(self.exitCompleted))
track.append(Func(base.transitions.irisIn))
if hasattr(avatar, 'startSmooth'):
track.append(Func(avatar.startSmooth))
track.delayDelete = DelayDelete.DelayDelete(avatar, 'DistributedDoor.avatarExitTrack')
return track
def exitCompleted(self):
base.localAvatar.setAnimState('neutral')
place = self.cr.playGame.getPlace()
if place:
place.setState('walk')
base.localAvatar.d_setParent(ToontownGlobals.SPRender)
def avatarExit(self, avatarID):
if avatarID in self.avatarIDList:
self.avatarIDList.remove(avatarID)
if avatarID == base.localAvatar.doId:
self.exitCompleted()
else:
self.avatarExitIDList.append(avatarID)
def finishDoorTrack(self):
if self.doorTrack:
self.doorTrack.finish()
self.doorTrack = None
def finishDoorExitTrack(self):
if self.doorExitTrack:
self.doorExitTrack.finish()
self.doorExitTrack = None
def finishAllTracks(self):
self.finishDoorTrack()
self.finishDoorExitTrack()
for t in self.avatarTracks:
t.finish()
DelayDelete.cleanupDelayDeletes(t)
self.avatarTracks = []
for t in self.avatarExitTracks:
t.finish()
DelayDelete.cleanupDelayDeletes(t)
self.avatarExitTracks = []
def enterOff(self):
pass
def exitOff(self):
pass
def getRequestStatus(self):
zoneId = self.otherZoneId
request = {
'loader': ZoneUtil.getBranchLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': 'doorIn',
'hoodId': ZoneUtil.getHoodId(zoneId),
'zoneId': zoneId,
'shardId': None,
'avId': -1,
'allowRedirect': 0,
'doorDoId': self.otherDoId }
return request
def enterClosing(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterClosing(): did not find doorFrameHoleRight')
return None
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return None
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(LerpHprInterval(nodePath = rightDoor, duration = 1.0, hpr = VBase3(0, 0, 0), startHpr = VBase3(h, 0, 0), other = otherNP, blendType = 'easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node = rightDoor), name = trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
request = self.getRequestStatus()
messenger.send('doorDoneEvent', [
request])
def exitClosing(self):
pass
def enterClosed(self, ts):
pass
def exitClosed(self):
pass
def enterOpening(self, ts):
doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')
if doorFrameHoleRight.isEmpty():
self.notify.warning('enterOpening(): did not find doorFrameHoleRight')
return None
rightDoor = self.findDoorNode('rightDoor')
if rightDoor.isEmpty():
self.notify.warning('enterOpening(): did not find rightDoor')
return None
otherNP = self.getDoorNodePath()
trackName = 'doorOpen-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Parallel(SoundInterval(self.openSfx, node = rightDoor), Sequence(HprInterval(rightDoor, VBase3(0, 0, 0), other = otherNP), Wait(0.40000000000000002), Func(rightDoor.show), Func(doorFrameHoleRight.show), LerpHprInterval(nodePath = rightDoor, duration = 0.59999999999999998, hpr = VBase3(h, 0, 0), startHpr = VBase3(0, 0, 0), other = otherNP, blendType = 'easeInOut')), name = trackName)
self.doorTrack.start(ts)
def exitOpening(self):
pass
def enterOpen(self, ts):
for avatarID in self.avatarIDList:
avatar = self.cr.doId2do.get(avatarID)
if avatar:
track = self.avatarEnterDoorTrack(avatar, 1.0)
track.start(ts)
self.avatarTracks.append(track)
if avatarID == base.localAvatar.doId:
self.done = 1
continue
self.avatarIDList = []
def exitOpen(self):
for track in self.avatarTracks:
track.finish()
DelayDelete.cleanupDelayDeletes(track)
self.avatarTracks = []
def exitDoorEnterOff(self):
pass
def exitDoorExitOff(self):
pass
def exitDoorEnterClosing(self, ts):
doorFrameHoleLeft = self.findDoorNode('doorFrameHoleLeft')
if doorFrameHoleLeft.isEmpty():
self.notify.warning('enterOpening(): did not find flatDoors')
return None
if self.leftSwing:
h = -100
else:
h = 100
leftDoor = self.findDoorNode('leftDoor')
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Sequence(LerpHprInterval(nodePath = leftDoor, duration = 1.0, hpr = VBase3(0, 0, 0), startHpr = VBase3(h, 0, 0), other = otherNP, blendType = 'easeInOut'), Func(doorFrameHoleLeft.hide), Func(self.hideIfHasFlat, leftDoor), SoundInterval(self.closeSfx, node = leftDoor), name = trackName)
self.doorExitTrack.start(ts)
def exitDoorExitClosing(self):
pass
def exitDoorEnterClosed(self, ts):
pass
def exitDoorExitClosed(self):
pass
def exitDoorEnterOpening(self, ts):
doorFrameHoleLeft = self.findDoorNode('doorFrameHoleLeft')
if doorFrameHoleLeft.isEmpty():
self.notify.warning('enterOpening(): did not find flatDoors')
return None
leftDoor = self.findDoorNode('leftDoor')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorDoorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Parallel(SoundInterval(self.openSfx, node = leftDoor), Sequence(Func(leftDoor.show), Func(doorFrameHoleLeft.show), LerpHprInterval(nodePath = leftDoor, duration = 0.59999999999999998, hpr = VBase3(h, 0, 0), startHpr = VBase3(0, 0, 0), other = otherNP, blendType = 'easeInOut')), name = trackName)
self.doorExitTrack.start(ts)
else:
self.notify.warning('exitDoorEnterOpening(): did not find leftDoor')
def exitDoorExitOpening(self):
pass
def exitDoorEnterOpen(self, ts):
for avatarID in self.avatarExitIDList:
avatar = self.cr.doId2do.get(avatarID)
if avatar:
track = self.avatarExitTrack(avatar, 0.20000000000000001)
track.start()
self.avatarExitTracks.append(track)
continue
self.avatarExitIDList = []
def exitDoorExitOpen(self):
for track in self.avatarExitTracks:
track.finish()
DelayDelete.cleanupDelayDeletes(track)
self.avatarExitTracks = []
def findDoorNode(self, string, allowEmpty = False):
building = self.getBuilding()
if not building:
self.notify.warning('getBuilding() returned None, avoiding crash, remark 896029')
foundNode = None
else:
foundNode = building.find('**/door_' + str(self.doorIndex) + '/**/' + string + '*;+s+i')
if foundNode.isEmpty():
foundNode = building.find('**/' + string + '*;+s+i')
if allowEmpty:
return foundNode
return foundNode
def hideIfHasFlat(self, node):
if self.bHasFlat:
node.hide()
|
[
"fr1tzanatore@aol.com"
] |
fr1tzanatore@aol.com
|
590d2207a922188f883dab5476511635e22f0ab1
|
408f8c561a695ac20b792ba0c4a230c154dad347
|
/scripts/slurm.py
|
1526201ab8cdf66bbed803e8fe3ad1e4f9c182d0
|
[] |
no_license
|
andnp/acceleration-v2
|
a407888c74a247e6d441259d50d77cf6194f728b
|
52b8a42c3e315ddbb4549a3a941afda81e92be9b
|
refs/heads/master
| 2022-11-26T05:42:17.680125
| 2020-08-02T23:25:01
| 2020-08-02T23:25:01
| 204,991,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,639
|
py
|
import time
import sys
import os
sys.path.append(os.getcwd())
from src.utils.model import loadExperiment
from PyExpUtils.runner import SlurmArgs
from PyExpUtils.results.paths import listResultsPaths
from PyExpUtils.utils.generator import group
from PyExpUtils.runner.Slurm import schedule, slurmOptionsFromFile
if len(sys.argv) < 4:
print('Please run again using')
print('python -m scripts.scriptName [src/entry.py] [path/to/slurm-def] [base_path] [runs] [paths/to/descriptions]...')
exit(0)
args = SlurmArgs.SlurmArgsModel({
'experiment_paths': sys.argv[5:],
'base_path': sys.argv[3],
'runs': 1,
'slurm_path': sys.argv[2],
'executable': "python " + sys.argv[1] + " " + sys.argv[4],
})
def generateMissing(paths):
for i, p in enumerate(paths):
summary_path = p + '/errors_summary.npy'
if not os.path.exists(summary_path):
yield i
def printProgress(size, it):
for i, _ in enumerate(it):
print(f'{i + 1}/{size}', end='\r')
if i - 1 == size:
print()
yield _
for path in args.experiment_paths:
print(path)
exp = loadExperiment(path)
slurm = slurmOptionsFromFile(args.slurm_path)
size = exp.permutations() * args.runs
paths = listResultsPaths(exp, args.runs)
paths = printProgress(size, paths)
indices = generateMissing(paths)
groupSize = slurm.tasks * slurm.tasksPerNode
for g in group(indices, groupSize):
l = list(g)
print("scheduling:", path, l)
slurm.tasks = min([slurm.tasks, len(l)])
schedule(slurm, args.executable + ' ' + path, l)
time.sleep(2)
|
[
"andnpatterson@gmail.com"
] |
andnpatterson@gmail.com
|
051f9297342646fcb998791b65313336c4cda8bd
|
dfa4163e86fc07beb35f63721dcaa317ec510ac4
|
/app.py
|
a43ccf13bc93d6fc6e4fe4d02bd8b2adc87f500d
|
[] |
no_license
|
shaw-s-yu/TBExtractor
|
d15c704b97dfbe67d62608f61b01dfe72f4a3d2c
|
409003ca480820b5d2e1c808eef626bdf28e5c8c
|
refs/heads/main
| 2023-03-15T02:36:00.752477
| 2021-03-27T16:50:03
| 2021-03-27T16:50:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
import sys
from utils import Utils
from model import Model
inputs = [1,2,3,4]
class App:
def __init__(self, mode):
self.mode = mode
self.utils = Utils()
self.model = Model()
self.modes = [
('1. generate article 6 from MOU01-22.txt', 'generate_article_6'),
('2. predict article 6', 'predict_article_6'),
('3. predict all of MOU01-22.txt', 'predict_mou'),
('4. predict another file', 'predict_other'),
('5. set output file path/name', 'set_out_f')
]
def generate_article_6(self):
self.utils.generate_article_6()
def predict_article_6(self):
self.utils.load_article_6()
res = self.model.predict(self.utils.data)
self.utils.make_output(res)
def predict_mou(self):
self.utils.load_input(self.utils.mou_f)
res = self.model.predict(self.utils.data)
self.utils.make_output(res)
def predict_other(self):
path = input('Enter file path to predict:')
self.utils.load_input(path)
res = self.model.predict(self.utils.data)
self.utils.make_output(res)
def set_out_f(self):
path = input('Enter file path of output:')
self.utils.out_f = path
def print_menu(self):
print('Enter number')
print('\n'.join([item[0] for item in self.modes]))
def run(self):
while not self.mode:
self.print_menu()
m = eval(input())
self.mode = None if m not in inputs else m
getattr(self, self.modes[self.mode-1][1])()
if __name__ == '__main__':
mode = None if len(sys.argv)==1 or eval(sys.argv[1]) not in inputs else eval(sys.argv[1])
app = App(mode)
app.run()
|
[
"46137960+AhoyKakkoii@users.noreply.github.com"
] |
46137960+AhoyKakkoii@users.noreply.github.com
|
71596ad05cca367d0a20f3d356341bed2b8ff66f
|
f99f080e4c9507b007d2c16ca5faae44f2694f8c
|
/lab2/lab_python_oop/Square.py
|
bcd5462ee3e2e9ce1773b839834b3f3e684dfaf4
|
[] |
no_license
|
amartery/RIP_5sem
|
0424448fc3fb428f86abff64b28097c3dda25290
|
d67ac8699c81d810a443623cc061ffcfa94ca1fb
|
refs/heads/main
| 2023-02-11T02:49:22.895024
| 2021-01-05T15:13:37
| 2021-01-05T15:13:37
| 306,724,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
from lab_python_oop import Rectangle
class Square(Rectangle.Rectangle):
"""Square"""
def __init__(self, a, color):
super().__init__(a, a, color)
def area(self):
return self.width * self.height
def __repr__(self):
return "Название класса: {}\n" \
"длинна стороны: {}\n" \
"цвет: {}\n" \
"площадь: {}\n".format(self.__doc__, self.width,
self.color.color, self.area())
|
[
"amartery@gmail.com"
] |
amartery@gmail.com
|
c9b9126eb7cfe8ea67cc1dd7bf1da71936a45f80
|
5f4adc8c51f9b7dd67a47f37eaf31e8ddb066f71
|
/core/cp_plugins/systray.py
|
9fc45cfcbb597ecc1d8dd71abfc8c312a66c380d
|
[
"Apache-2.0"
] |
permissive
|
cryorooster/watcher
|
1a4f186cb9d0a0c84f80e30073b313a0bd049995
|
0dd25241a01d7dcb9ffcd312cc2472b2c9cb2983
|
refs/heads/master
| 2021-01-23T04:45:09.272825
| 2017-02-05T23:36:39
| 2017-02-05T23:36:49
| 80,380,818
| 0
| 0
| null | 2017-01-29T23:39:16
| 2017-01-29T23:39:15
| null |
UTF-8
|
Python
| false
| false
| 2,563
|
py
|
import logging
import sys
import webbrowser
import cherrypy
import core
from cherrypy.process import plugins
from infi.systray import SysTrayIcon
logging = logging.getLogger(__name__)
class SysTrayPlugin(plugins.SimplePlugin):
'''
CherryPy plugin that creates a system tray icon for Windows.
Because SysTrayIcon always fires off on_quit, we can't have on_quit
execute cherrypy.engine.exit() if the exit command is what triggered
SysTrayIcon to close. So conditions are set to only fire on_quit when
the quit_method == 'menu'.
This way, when the menu option is called, it destroys SysTrayIcon then
closes cherrypy. Cherrypy will try to close SysTrayIcon by calling
stop(), so stop() gets reassigned to None.
If the app is closed by cherrypy (whether catching a kb interrupt or the GUI
shutdown button), cherrypy stops the plugin by calling stop(). Stop()
reassigns SysTrayIcon._on_quit to None and calls SysTrayIcon.shutdown().
SysTrayIcon is then destroyed (twice for reasons I can't figure out),
then cherrypy finishes up the engine.stop() and engine.exit().
The chain is as such:
Trigger == systray menu 'Quit':
SysTrayIcon._destroy() >
SysTrayIcon._on_quit() > set SysTrayPlugin.quit_method = 'menu'
cherrypy.engine.exit() >
SysTrayPlugin.stop() > does nothing
sys.exit()
Trigger == KBInterrupt or GUI Shutdown:
cherrypy.engine.stop() >
SysTrayPlugin.stop() > disable SysTrayIcon._on_quit()
SysTrayIcon.shutdown() >
SysTrayIcon._destroy() >
SysTrayIcon._destroy() >
cherrypy.engine.exit() >
sys.exit()
'''
def __init__(self, bus):
plugins.SimplePlugin.__init__(self, bus)
menu_options = (('Open Browser', None, self.open),)
self.systray = SysTrayIcon('core/favicon.ico', 'Watcher',
menu_options, on_quit=self.on_quit)
self.quit_method = None
return
def start(self):
self.systray.start()
return
def stop(self):
if self.quit_method == 'menu':
return
else:
self.systray._on_quit = None
self.systray.shutdown()
return
def on_quit(self, systray):
self.quit_method = 'menu'
cherrypy.engine.exit()
sys.exit(0)
# sys tray functions:
def open(self, systray):
webbrowser.open('http://{}:{}{}'.format(
core.SERVER_ADDRESS, core.SERVER_PORT, core.URL_BASE))
return
|
[
"nosmokingbandit@gmail.com"
] |
nosmokingbandit@gmail.com
|
a81da2eb335b9334d5ffe13dc3ee8929dd6a7c6e
|
31b83dbd1098fbba49a1d559f9ecac4d3b118fc8
|
/pyEX/premium/wallstreethorizon/__init__.py
|
5cfb183f73a4383b500379e1896460420d585f9a
|
[
"Apache-2.0"
] |
permissive
|
briangu/pyEX
|
438f777bdf7661f47fe7b63a0a848d6e90f8e9df
|
2eacc322932f4b686817b3d162cb1e4f399fd696
|
refs/heads/main
| 2023-07-15T16:15:17.277704
| 2021-08-17T16:36:36
| 2021-08-17T16:36:36
| 331,754,038
| 0
| 0
|
Apache-2.0
| 2021-01-21T21:01:48
| 2021-01-21T21:01:48
| null |
UTF-8
|
Python
| false
| false
| 23,317
|
py
|
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the jupyterlab_templates library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
from ...common import _interval
from ...stocks import timeSeries, timeSeriesDF
@_interval(hours=4)
def _base(id, symbol="", **kwargs):
"""internal"""
kwargs["id"] = id
kwargs["key"] = symbol or kwargs.pop("key", "")
return timeSeries(**kwargs)
@_interval(hours=4)
def _baseDF(id, symbol="", **kwargs):
"""internal"""
kwargs["id"] = id
kwargs["key"] = symbol or kwargs.pop("key", "")
return timeSeriesDF(**kwargs)
@wraps(timeSeries)
def analystDays(symbol="", **kwargs):
"""This is a meeting where company executives provide information about the company’s performance and its future prospects.
https://iexcloud.io/docs/api/#analyst-days
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_ANALYST_DAY", symbol=symbol, **kwargs)
@wraps(timeSeries)
def analystDaysDF(symbol="", **kwargs):
"""This is a meeting where company executives provide information about the company’s performance and its future prospects.
https://iexcloud.io/docs/api/#analyst-days
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_ANALYST_DAY", symbol=symbol, **kwargs)
@wraps(timeSeries)
def boardOfDirectorsMeeting(symbol="", **kwargs):
"""This is an end-point for getting information about a formal meeting of a company’s board of directors to establish corporate management related policies and to make decisions on major company issues.
https://iexcloud.io/docs/api/#analyst-days
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_BOARD_OF_DIRECTORS_MEETING",
symbol=symbol,
**kwargs
)
@wraps(timeSeries)
def boardOfDirectorsMeetingDF(symbol="", **kwargs):
"""This is a meeting where company executives provide information about the company’s performance and its future prospects.
https://iexcloud.io/docs/api/#board-of-directors-meeting
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_BOARD_OF_DIRECTORS_MEETING",
symbol=symbol,
**kwargs
)
@wraps(timeSeries)
def businessUpdates(symbol="", **kwargs):
"""This is a meeting orconference call in which company information is reviewed by one or more company executives.
https://iexcloud.io/docs/api/#business-updates
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_BUSINESS_UPDATE", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def businessUpdatesDF(symbol="", **kwargs):
"""This is a meeting orconference call in which company information is reviewed by one or more company executives.
https://iexcloud.io/docs/api/#business-updates
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_BUSINESS_UPDATE", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def buybacks(symbol="", **kwargs):
"""The repurchase of outstanding shares by a company to reduce the number of shares on the market.
https://iexcloud.io/docs/api/#buybacks
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_BUYBACK", symbol=symbol, **kwargs)
@wraps(timeSeries)
def buybacksDF(symbol="", **kwargs):
"""The repurchase of outstanding shares by a company to reduce the number of shares on the market.
https://iexcloud.io/docs/api/#buybacks
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_BUYBACK", symbol=symbol, **kwargs)
@wraps(timeSeries)
def capitalMarketsDay(symbol="", **kwargs):
"""This is a meeting where company executives provide information about the company’s performance and its future prospects.
https://iexcloud.io/docs/api/#capital-markets-day
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_CAPITAL_MARKETS_DAY", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def capitalMarketsDayDF(symbol="", **kwargs):
"""This is a meeting where company executives provide information about the company’s performance and its future prospects.
https://iexcloud.io/docs/api/#capital-markets-day
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_CAPITAL_MARKETS_DAY", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def companyTravel(symbol="", **kwargs):
"""This is a roadshow or bus tour event in which one or more company executives speaks to interested investors and analysts.
https://iexcloud.io/docs/api/#company-travel
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_COMPANY_TRAVEL", symbol=symbol, **kwargs)
@wraps(timeSeries)
def companyTravelDF(symbol="", **kwargs):
"""This is a roadshow or bus tour event in which one or more company executives speaks to interested investors and analysts.
https://iexcloud.io/docs/api/#company-travel
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_COMPANY_TRAVEL", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def filingDueDates(symbol="", **kwargs):
"""This is an estimated date, based on historical trends for this company in which a company must file the appropriate Form for the quarter/year or file for an extension.
https://iexcloud.io/docs/api/#filing-due-dates
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_FILING_DUE_DATE", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def filingDueDatesDF(symbol="", **kwargs):
"""This is an estimated date, based on historical trends for this company in which a company must file the appropriate Form for the quarter/year or file for an extension.
https://iexcloud.io/docs/api/#filing-due-dates
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_FILING_DUE_DATE", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def fiscalQuarterEnd(symbol="", **kwargs):
"""This is a forecasted quarterly ending announcement date for a company. This may or may not correspond to a calendar quarter.
https://iexcloud.io/docs/api/#fiscal-quarter-end
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_FISCAL_QUARTER_END_DATE", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def fiscalQuarterEndDF(symbol="", **kwargs):
"""This is a forecasted quarterly ending announcement date for a company. This may or may not correspond to a calendar quarter.
https://iexcloud.io/docs/api/#fiscal-quarter-end
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_FISCAL_QUARTER_END_DATE", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def forum(symbol="", **kwargs):
"""This is a meeting where ideas and views of a business nature can be exchanged.
https://iexcloud.io/docs/api/#forum
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_FORUM", symbol=symbol, **kwargs)
@wraps(timeSeries)
def forumDF(symbol="", **kwargs):
"""This is a meeting where ideas and views of a business nature can be exchanged.
https://iexcloud.io/docs/api/#forum
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_FORUM", symbol=symbol, **kwargs)
@wraps(timeSeries)
def generalConference(symbol="", **kwargs):
"""This is a formal meeting in which representatives of many companies gather to discuss ideas or issues related to a particular topic or business, usually held for several days. This item indicates at least one representative from the company will be presenting at the conference on the specified date and time. Note: Conference details include full Conference dates.
https://iexcloud.io/docs/api/#general-conference
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_GENERAL_CONFERENCE", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def generalConferenceDF(symbol="", **kwargs):
"""This is a formal meeting in which representatives of many companies gather to discuss ideas or issues related to a particular topic or business, usually held for several days. This item indicates at least one representative from the company will be presenting at the conference on the specified date and time. Note: Conference details include full Conference dates.
https://iexcloud.io/docs/api/#general-conference
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_GENERAL_CONFERENCE", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def fdaAdvisoryCommitteeMeetings(symbol="", **kwargs):
"""The FDA uses 50 committees and panels to obtain independent expert advice on scientific, technical, and policy matters
https://iexcloud.io/docs/api/#fda-advisory-committee-meetings
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_STOCK_SPECIFIC_FDA_ADVISORY_COMMITTEE_MEETING",
symbol=symbol,
**kwargs
)
@wraps(timeSeries)
def fdaAdvisoryCommitteeMeetingsDF(symbol="", **kwargs):
"""The FDA uses 50 committees and panels to obtain independent expert advice on scientific, technical, and policy matters
https://iexcloud.io/docs/api/#fda-advisory-committee-meetings
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_STOCK_SPECIFIC_FDA_ADVISORY_COMMITTEE_MEETING",
symbol=symbol,
**kwargs
)
@wraps(timeSeries)
def holidaysWSH(symbol="", **kwargs):
"""This returns a list of market holidays.
https://iexcloud.io/docs/api/#holidays
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_HOLIDAYS", symbol=symbol, **kwargs)
@wraps(timeSeries)
def holidaysWSHDF(symbol="", **kwargs):
"""This returns a list of market holidays.
https://iexcloud.io/docs/api/#holidays
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_HOLIDAYS", symbol=symbol, **kwargs)
@wraps(timeSeries)
def indexChanges(symbol="", **kwargs):
"""This shows additions and removals from various indexes for particular stocks.
https://iexcloud.io/docs/api/#index-changes
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_INDEX_CHANGE", symbol=symbol, **kwargs)
@wraps(timeSeries)
def indexChangesDF(symbol="", **kwargs):
"""This shows additions and removals from various indexes for particular stocks.
https://iexcloud.io/docs/api/#index-changes
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_INDEX_CHANGE", symbol=symbol, **kwargs)
@wraps(timeSeries)
def iposWSH(symbol="", **kwargs):
"""Get a list of upcoming IPOs.
https://iexcloud.io/docs/api/#ipos
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_INITIAL_PUBLIC_OFFERING", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def iposWSHDF(symbol="", **kwargs):
"""Get a list of upcoming IPOs.
https://iexcloud.io/docs/api/#ipos
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_INITIAL_PUBLIC_OFFERING", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def legalActions(symbol="", **kwargs):
"""These are legal actions where an individual represents a group in a court claim. The judgment from the suit is for all the members of the group or class.
https://iexcloud.io/docs/api/#legal-actions
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_LEGAL_ACTIONS", symbol=symbol, **kwargs)
@wraps(timeSeries)
def legalActionsDF(symbol="", **kwargs):
"""These are legal actions where an individual represents a group in a court claim. The judgment from the suit is for all the members of the group or class.
https://iexcloud.io/docs/api/#legal-actions
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_LEGAL_ACTIONS", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def mergersAndAcquisitions(symbol="", **kwargs):
"""These are a type of corporate action in which two companies combine to form a single company, or one company is taken over by another.
https://iexcloud.io/docs/api/#mergers-acquisitions
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_MERGER_ACQUISITIONS", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def mergersAndAcquisitionsDF(symbol="", **kwargs):
"""These are a type of corporate action in which two companies combine to form a single company, or one company is taken over by another.
https://iexcloud.io/docs/api/#mergers-acquisitions
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_MERGER_ACQUISITIONS", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def productEvents(symbol="", **kwargs):
"""Represents movie and video releases. This is the date on which a movie distributor plans to release a movie to theaters
https://iexcloud.io/docs/api/#product-events
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_PRODUCT_EVENTS", symbol=symbol, **kwargs)
@wraps(timeSeries)
def productEventsDF(symbol="", **kwargs):
"""Represents movie and video releases. This is the date on which a movie distributor plans to release a movie to theaters
https://iexcloud.io/docs/api/#product-events
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_PRODUCT_EVENTS", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def researchAndDevelopmentDays(symbol="", **kwargs):
"""This is a day in which investors and analysts can meet with a company’s R&D representatives to learn more about new or improved products and services.
https://iexcloud.io/docs/api/#research-and-development-days
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_RD_DAY", symbol=symbol, **kwargs)
@wraps(timeSeries)
def researchAndDevelopmentDaysDF(symbol="", **kwargs):
"""This is a day in which investors and analysts can meet with a company’s R&D representatives to learn more about new or improved products and services.
https://iexcloud.io/docs/api/#research-and-development-days
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_RD_DAY", symbol=symbol, **kwargs)
@wraps(timeSeries)
def sameStoreSales(symbol="", **kwargs):
"""Same-store sales, also referred to as comparable-store sales, SSS or identical-store sales, is a financial metric that companies in the retail industry use to evaluate the total dollar amount of sales in the company’s stores that have been operating for a year or more.
https://iexcloud.io/docs/api/#same-store-sales
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_SAME_STORE_SALES", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def sameStoreSalesDF(symbol="", **kwargs):
"""Same-store sales, also referred to as comparable-store sales, SSS or identical-store sales, is a financial metric that companies in the retail industry use to evaluate the total dollar amount of sales in the company’s stores that have been operating for a year or more.
https://iexcloud.io/docs/api/#same-store-sales
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_SAME_STORE_SALES", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def secondaryOfferings(symbol="", **kwargs):
"""Secondary Offerings are the issuance of new stock for public sale from a company that has already made its initial public offering (IPO).
Usually, these kinds of public offerings are made by companies wishing to refinance, or raise capital for growth.
Money raised from these kinds of secondary offerings goes to the company, through the investment bank that underwrites the offering.
Investment banks are issued an allotment, and possibly an overallotment which they may choose to exercise if there is a strong possibility of making money on the spread between the allotment price and the selling price of the securities. Short Selling is prohibited during the period of the secondary offering.
https://iexcloud.io/docs/api/#secondary-offerings
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_SECONDARY_OFFERING", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def secondaryOfferingsDF(symbol="", **kwargs):
"""Secondary Offerings are the issuance of new stock for public sale from a company that has already made its initial public offering (IPO).
Usually, these kinds of public offerings are made by companies wishing to refinance, or raise capital for growth.
Money raised from these kinds of secondary offerings goes to the company, through the investment bank that underwrites the offering.
Investment banks are issued an allotment, and possibly an overallotment which they may choose to exercise if there is a strong possibility of making money on the spread between the allotment price and the selling price of the securities. Short Selling is prohibited during the period of the secondary offering.
https://iexcloud.io/docs/api/#secondary-offerings
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_SECONDARY_OFFERING", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def seminars(symbol="", **kwargs):
"""This is an educational event that features one or more subject matter experts delivering information via lecture and discussion.
https://iexcloud.io/docs/api/#seminars
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_SEMINAR", symbol=symbol, **kwargs)
@wraps(timeSeries)
def seminarsDF(symbol="", **kwargs):
"""This is an educational event that features one or more subject matter experts delivering information via lecture and discussion.
https://iexcloud.io/docs/api/#seminars
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_SEMINAR", symbol=symbol, **kwargs)
@wraps(timeSeries)
def shareholderMeetings(symbol="", **kwargs):
"""This is a meeting, held at least annually, to elect members to the board of directors and hear reports on the business’ financial situation as well as new policy initiatives from the corporation’s management.
https://iexcloud.io/docs/api/#shareholder-meetings
Args:
symbol (str): symbol to use
"""
return _base(
id="PREMIUM_WALLSTREETHORIZON_SHAREHOLDER_MEETING", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def shareholderMeetingsDF(symbol="", **kwargs):
"""This is a meeting, held at least annually, to elect members to the board of directors and hear reports on the business’ financial situation as well as new policy initiatives from the corporation’s management.
https://iexcloud.io/docs/api/#shareholder-meetings
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_SHAREHOLDER_MEETING", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def summitMeetings(symbol="", **kwargs):
"""This is a gathering of people who are interested in the same business subject or topic.
https://iexcloud.io/docs/api/#summit-meetings
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_SUMMIT_MEETING", symbol=symbol, **kwargs)
@wraps(timeSeries)
def summitMeetingsDF(symbol="", **kwargs):
"""This is a gathering of people who are interested in the same business subject or topic.
https://iexcloud.io/docs/api/#summit-meetings
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_SUMMIT_MEETING", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def tradeShows(symbol="", **kwargs):
"""This is a large gathering in which different companies in a particular field or industry show their products to possible customers.
https://iexcloud.io/docs/api/#trade-shows
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_TRADE_SHOW", symbol=symbol, **kwargs)
@wraps(timeSeries)
def tradeShowsDF(symbol="", **kwargs):
"""This is a large gathering in which different companies in a particular field or industry show their products to possible customers.
https://iexcloud.io/docs/api/#trade-shows
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_TRADE_SHOW", symbol=symbol, **kwargs)
@wraps(timeSeries)
def witchingHours(symbol="", **kwargs):
"""This is when option contracts and futures contracts expire on the exact same day.
https://iexcloud.io/docs/api/#witching-hours
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_WITCHING_HOURS", symbol=symbol, **kwargs)
@wraps(timeSeries)
def witchingHoursDF(symbol="", **kwargs):
"""This is when option contracts and futures contracts expire on the exact same day.
https://iexcloud.io/docs/api/#witching-hours
Args:
symbol (str): symbol to use
"""
return _baseDF(
id="PREMIUM_WALLSTREETHORIZON_WITCHING_HOURS", symbol=symbol, **kwargs
)
@wraps(timeSeries)
def workshops(symbol="", **kwargs):
"""This is a meeting or series of meetings at which a group of people engage in discussion and activity on a particular subject, product or service to gain hands-on experience.
https://iexcloud.io/docs/api/#workshops
Args:
symbol (str): symbol to use
"""
return _base(id="PREMIUM_WALLSTREETHORIZON_WORKSHOP", symbol=symbol, **kwargs)
@wraps(timeSeries)
def workshopsDF(symbol="", **kwargs):
"""This is a meeting or series of meetings at which a group of people engage in discussion and activity on a particular subject, product or service to gain hands-on experience.
https://iexcloud.io/docs/api/#workshops
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_WORKSHOP", symbol=symbol, **kwargs)
|
[
"t.paine154@gmail.com"
] |
t.paine154@gmail.com
|
1b2353d1c72900274f10c25e52be0fc3a3553207
|
fa9b5bbe55d261627b89a91d73a38ec4d132a76d
|
/plugins/ipythonnb/ipythonnb.py
|
012d239d07599726919fd25906356bcaf544e113
|
[] |
no_license
|
jcbozonier/DataBozo
|
cfda67a6e32a3761c4f647d19aad62feaca75198
|
2f3e899a563919760a072da33e5547758a086e18
|
refs/heads/master
| 2021-01-23T13:49:36.068876
| 2013-12-30T02:41:10
| 2013-12-30T02:41:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,284
|
py
|
import os
from pelican import signals
try:
from pelican.readers import BaseReader # new Pelican API
except ImportError:
from pelican.readers import Reader as BaseReader
try:
from pelican.readers import EXTENSIONS # old Pelican API
except ImportError:
EXTENSIONS = None
try:
import json
import markdown
from IPython.config import Config
from IPython.nbconvert.exporters import HTMLExporter
from IPython.nbconvert.filters.highlight import _pygment_highlight
from pygments.formatters import HtmlFormatter
except Exception as e:
IPython = False
raise e
CUSTOM_CSS = '''
<style type="text/css">
div.input_area {
border: none;
background: none;
}
pre.ipynb {
padding: 3px 9.5px;
}
@media print{*{text-shadow:none !important;color:#000 !important;background:transparent !important;box-shadow:none !important;} a,a:visited{text-decoration:underline;} a[href]:after{content:" (" attr(href) ")";} abbr[title]:after{content:" (" attr(title) ")";} .ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:"";} pre,blockquote{border:1px solid #999;page-break-inside:avoid;} thead{display:table-header-group;} tr,img{page-break-inside:avoid;} img{max-width:100% !important;} @page {margin:0.5cm;}p,h2,h3{orphans:3;widows:3;} h2,h3{page-break-after:avoid;}}
.cell.border-box-sizing.code_cell.vbox {
max-width: 750px;
margin: 0 auto;
}
pre {
font-size: 1em;
}
/* Forcing DataFrame table styles */
table.dataframe {
font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif;
font-size: 14px;
line-height: 20px;
}
table.dataframe th, td {
padding: 4px;
text-align: left;
}
.anchor-link {
display: none;
}
.anchor-link:hover {
display: blockquote;
}
</style>
'''
def custom_highlighter(source, language='ipython'):
formatter = HtmlFormatter(cssclass='highlight-ipynb')
output = _pygment_highlight(source, formatter, language)
output = output.replace('<pre>', '<pre class="ipynb">')
return output
class iPythonNB(BaseReader):
enabled = True
file_extensions = ['ipynb']
def read(self, filepath):
filedir = os.path.dirname(filepath)
filename = os.path.basename(filepath)
_metadata = {}
# See if metadata file exists metadata
metadata_filename = filename.split('.')[0] + '.ipynb-meta'
metadata_filepath = os.path.join(filedir, metadata_filename)
if os.path.exists(metadata_filepath):
with open(metadata_filepath, 'r') as metadata_file:
content = metadata_file.read()
metadata_file = open(metadata_filepath)
md = markdown.Markdown(extensions=['meta'])
md.convert(content)
_metadata = md.Meta
for key, value in _metadata.items():
_metadata[key] = value[0]
else:
# Try to load metadata from inside ipython nb
ipynb_file = open(filepath)
_metadata = json.load(ipynb_file)['metadata']
metadata = {}
for key, value in _metadata.items():
key = key.lower()
metadata[key] = self.process_metadata(key, value)
metadata['ipython'] = True
# Converting ipythonnb to html
config = Config({'CSSHTMLHeaderTransformer': {'enabled': True, 'highlight_class': '.highlight-ipynb'}})
exporter = HTMLExporter(config=config, template_file='basic', filters={'highlight2html': custom_highlighter})
body, info = exporter.from_filename(filepath)
def filter_tags(s):
l = s.split('\n')
exclude = ['a', '.rendered_html', '@media']
l = [i for i in l if len(list(filter(i.startswith, exclude))) == 0]
ans = '\n'.join(l)
return STYLE_TAG.format(ans)
STYLE_TAG = '<style type=\"text/css\">{0}</style>'
css = '\n'.join(filter_tags(css) for css in info['inlining']['css'])
css = css + CUSTOM_CSS
body = css + body
return body, metadata
def add_reader(arg):
if EXTENSIONS is None: # new pelican API:
arg.settings['READERS']['ipynb'] = iPythonNB
else:
EXTENSIONS['ipynb'] = iPythonNB
def register():
signals.initialized.connect(add_reader)
|
[
"darkxanthos@gmail.com"
] |
darkxanthos@gmail.com
|
5faff836f01be1ca229e5d45ff91386da1400121
|
bc7cd6689a8052d442ded8e876de1e5f22bfad6c
|
/lsml/feature/provided/shape.py
|
c4f910d38d3f18cbd76060977e01cb6f94890147
|
[
"BSD-3-Clause"
] |
permissive
|
tor4z/level-set-machine-learning
|
3a359e0d55137f3c0a9cbcaf25048c61573abd25
|
38460e514d48f3424bb8d3bd58cb3eb330153e64
|
refs/heads/master
| 2022-04-08T08:04:27.200188
| 2020-01-26T03:09:56
| 2020-01-26T03:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,052
|
py
|
import numpy
from skimage.measure import marching_cubes_lewiner as marching_cubes
from skimage.measure import find_contours, mesh_surface_area
from lsml.feature.base_feature import (
BaseShapeFeature, GLOBAL_FEATURE_TYPE, LOCAL_FEATURE_TYPE)
class Size(BaseShapeFeature):
""" Computes the size of the region enclosed by the zero level set of u.
In 1D, this is length. In 2D, it is area, and in 3D, it is volume.
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
if self.ndim == 1:
return 'Length'
elif self.ndim == 2:
return 'Area'
elif self.ndim == 3:
return 'Volume'
else:
return 'Hyper-volume'
def compute_feature(self, u, dist, mask, dx):
size = (u > 0).sum() * numpy.prod(dx)
feature = numpy.empty_like(u)
feature[mask] = size
return feature
class BoundarySize(BaseShapeFeature):
""" Computes the size of the zero-level set of u. In 2D, this is
the length of the implicit curve. In 3D, it is surface area.
"""
locality = GLOBAL_FEATURE_TYPE
def __init__(self, ndim=2):
if ndim < 2 or ndim > 3:
msg = ("Boundary size is only defined for dimensions 2 and 3; "
"ndim provided = {}")
raise ValueError(msg.format(ndim))
super(BoundarySize, self).__init__(ndim)
@property
def name(self):
if self.ndim == 2:
return 'Curve length'
elif self.ndim == 3:
return 'Surface area'
def compute_feature(self, u, dist, mask, dx):
feature = numpy.empty_like(u)
if self.ndim == 2:
boundary_size = self._compute_arc_length(u, dx)
elif self.ndim == 3:
boundary_size = self._compute_surface_area(u, dx)
else:
msg = "Cannot compute boundary size for ndim = {}"
raise RuntimeError(msg.format(self.ndim))
feature[mask] = boundary_size
return feature
def _compute_arc_length(self, u, dx):
contours = find_contours(u, 0)
total_arc_length = 0.
for contour in contours:
closed_contour = numpy.vstack((contour, contour[0]))
closed_contour *= dx[::-1] # find_contours points in index space
arc_length = numpy.linalg.norm(numpy.diff(closed_contour, axis=0),
axis=1).sum()
total_arc_length += arc_length
return total_arc_length
def _compute_surface_area(self, u, dx):
verts, faces, _, _ = marching_cubes(u, 0., spacing=dx)
return mesh_surface_area(verts, faces)
class IsoperimetricRatio(BaseShapeFeature):
""" Computes the isoperimetric ratio, which is a measure of
circularity in two dimensions and a measure of sphericity in three.
In both cases, the maximum ratio value of 1 is achieved only for
a perfect circle or sphere.
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
if self.ndim == 2:
return 'Circularity'
else:
return 'Sphericity'
def __init__(self, ndim=2):
if ndim < 2 or ndim > 3:
msg = ("Isoperimetric ratio defined for dimensions 2 and 3; "
"ndim provided = {}")
raise ValueError(msg.format(ndim))
super(IsoperimetricRatio, self).__init__(ndim)
def compute_feature(self, u, dist, mask, dx):
if self.ndim == 2:
return self.compute_feature2d(
u=u, dist=dist, mask=mask, dx=dx)
else:
return self.compute_feature3d(
u=u, dist=dist, mask=mask, dx=dx)
def compute_feature2d(self, u, dist, mask, dx):
# Compute the area
size = Size(ndim=2)
area = size.compute_feature(u=u, dist=dist, mask=mask, dx=dx)
# Compute the area
boundary_size = BoundarySize(ndim=2)
curve_length = boundary_size.compute_feature(
u=u, dist=dist, mask=mask, dx=dx)
feature = numpy.empty_like(u)
feature[mask] = 4*numpy.pi*area[mask] / curve_length[mask]**2
return feature
def compute_feature3d(self, u, dist, mask, dx):
# Compute the area
size = Size(ndim=3)
volume = size(u=u, dist=dist, mask=mask, dx=dx)
# Compute the area
boundary_size = BoundarySize(ndim=3)
surface_area = boundary_size(u=u, dist=dist, mask=mask, dx=dx)
feature = numpy.empty_like(u)
feature[mask] = 36*numpy.pi*volume[mask]**2 / surface_area[mask]**3
return feature
class Moments(BaseShapeFeature):
""" Computes the normalized statistical moments of a given order along
a given axis
"""
locality = GLOBAL_FEATURE_TYPE
@property
def name(self):
return "Moments (axes={}; orders={})".format(self.axes, self.orders)
@property
def size(self):
return len(self.axes) * len(self.orders)
def __init__(self, ndim=2, axes=(0, 1), orders=(1, 2)):
""" Initialize a normalized statistical moment feature
ndim: int
Number of dimensions
axes: list[int], default=[0, 1]
The axes along which the moment should be computed
order: list[int], default=[1, 2]
The orders of the moments, e.g., order=1 yields the 'center of
mass' coordinate along the given axis and order=2 yields a measure
of spread along the given axis
"""
super(Moments, self).__init__(ndim)
for axis in axes:
if axis < 0 or axis > ndim-1:
msg = "axis provided ({}) must be one of 0 ... {}"
raise ValueError(msg.format(axis, ndim-1))
for order in orders:
if order < 1:
msg = "Moments order should be greater than or equal to 1"
raise ValueError(msg)
self.axes = axes
self.orders = orders
def _compute_center_of_mass(self, u, dx):
# Initialize center of mass container and mask with singular entry
center_of_mass = numpy.zeros(self.ndim)
mask = numpy.empty(u.shape, dtype=numpy.bool)
mask.ravel()[0] = True
for i in range(self.ndim):
center_of_mass[i] = self._compute_moment(
u=u, dist=u, mask=mask, dx=dx, axis=i, order=1)
return center_of_mass
def _compute_moment(self, u, dist, mask, dx, axis, order):
""" Computes the feature for just a single axis and order """
indices = numpy.indices(u.shape, dtype=numpy.float)
mesh = indices[axis] * dx[axis]
size = Size(ndim=self.ndim)
# Normalize by centering if order is greater than 1
if order > 1:
center_of_mass = self._compute_center_of_mass(u=u, dx=dx)
mesh -= center_of_mass[axis]
measure = size(u=u, dist=dist, mask=mask, dx=dx)[mask].ravel()[0]
moment = (mesh**order)[u > 0].sum() * numpy.prod(dx) / measure
return moment
def compute_feature(self, u, dist, mask, dx):
from itertools import product
features = numpy.empty(u.shape + (self.size,))
for i, (axis, order) in enumerate(product(self.axes, self.orders)):
features[mask, i] = self._compute_moment(
u, dist, mask, dx, axis, order)
return features
class DistanceToCenterOfMass(BaseShapeFeature):
""" Computes the distance to the computed center of mass
"""
locality = LOCAL_FEATURE_TYPE
@property
def name(self):
return "Distance to center of mass"
def compute_feature(self, u, dist, mask, dx):
# Sneakily use the center of mass utility buried in the
# moment feature class
moment_feature = Moments(ndim=self.ndim)
center_of_mass = moment_feature._compute_center_of_mass(u, dx)
# Add extra axes for some broadcasting below
slicer = tuple([slice(None), ] + [None for _ in range(self.ndim)])
indices = numpy.indices(u.shape, dtype=numpy.float)
mesh = indices * dx[slicer]
feature = numpy.empty_like(u)
feature[mask] = numpy.linalg.norm(
mesh - center_of_mass[slicer], axis=0)[mask]
return feature
def get_basic_shape_features(ndim=2, moment_orders=[1, 2]):
""" Generate a list of basic shape features at multiple sigma values
Parameters
----------
ndim : int, default=2
The number of dimension of the image to which these features
will be applied
moment_orders : list[float], default=[1, 2]
Orders for which we compute moments
Returns
-------
features : list[BaseImageFeature]
A list of image feature instances
"""
feature_classes = [
BoundarySize,
DistanceToCenterOfMass,
IsoperimetricRatio,
Moments,
Size,
]
return [
feature_class(ndim=ndim)
for feature_class in feature_classes
]
|
[
"mhancock743@gmail.com"
] |
mhancock743@gmail.com
|
152865b4e9ca49df00660bb1023111f00e83ff72
|
096711aabd6f09aaf501be8919344a2da63e2c91
|
/parse_input.py
|
9ee4f9c9019e487ea5e6af6d7ce46e641a836ba3
|
[] |
no_license
|
KseniiaPrytkova/Computor_v1
|
c85f6c0d1f5c49d3582014d96dde330d234666fe
|
8cdec686eee3d6f178cf8a1c5612ebae4dcc79e8
|
refs/heads/master
| 2022-07-06T01:23:22.879725
| 2020-05-13T20:25:56
| 2020-05-13T20:25:56
| 257,624,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,986
|
py
|
#!/usr/bin/env python
import re
import sys
def print_reduced_form(m):
signs = []
s = ""
for i, e in enumerate(m[0]):
if (e > 0):
signs += [" + "]
else:
signs += [" - "]
s = "Reduced form: " + str(m[0][0]) + " * X^" + str(0)
if (len(m[0]) >= 2):
s += signs[1] + str(m[0][1]) + " * X^" + str(1)
if (len(m[0]) >= 3):
s += signs[2] + str(m[0][2]) + " * X^" + str(2)
s += " = 0"
s = s.replace("+ -", "-")
# print('\x1b[6;33;45m' + s + '\x1b[0m')
print(s)
def check_parsed_input(s, r_1, r_2):
s_2 = "".join([e[0] for e in r_1]) + "=" + "".join([e[0] for e in r_2])
if s_2 != s:
sys.stderr.write("usage: all terms must be of the form a * x^p\n" +
"example: 5 * X^0 + 4 * X^1 - 9.3 * X^2 = 1 * X^0\n")
sys.exit(1)
def parse_input(s):
# delete all spaces in a string:
s = s.replace(" ", "")
# ['5*X^0+4*X^1-9.3*X^2', '1*X^0']
s_l = s.split('=')
if len(s_l) != 2:
sys.stderr.write("ERROR: exactly one equal sign expected\n")
sys.exit(1)
regexp = r"(([+-]?\d+(\.\d+)?)[*][Xx]\^([+-]?\d+(\.\d+)?))"
r_1, r_2 = re.findall(regexp, s_l[0]), re.findall(regexp, s_l[1])
check_parsed_input(s, r_1, r_2)
m = [{0: [], 1: [], 2: []}, {0: [], 1: [], 2: []}]
for res in r_1:
m[0][int(res[3])] += [float(res[1])]
for res in r_2:
m[1][int(res[3])] += [float(res[1])]
n = [{}, {}]
# Simplify: left.
for key in m[0]:
n[0][key] = sum(m[0][key])
# Simplify: right.
for key in m[1]:
n[1][key] = sum(m[1][key])
# Simplify: move all elements in the right to the left.
for key in n[1]:
if key in n[0]:
n[0][key] = n[0][key] - n[1][key]
else:
n[0][key] = -n[1][key]
print_reduced_form(n)
# Get all x^y values from y=0 to y=2.
res = [ n[0][i] if i in n[0] else "-" for i in range(len(n[0])) ]
return(res)
|
[
"prytkovakseniia@gmail.com"
] |
prytkovakseniia@gmail.com
|
5adcc2b75ec8e92bbb4324fb2738b75d080041f2
|
25424a6ce39394c729088ac0dc0c1a8120eae3e5
|
/interactiveeuler/__main__.py
|
d4261c497a066c5e3a163c9d8f8d4a1d1b7a940a
|
[] |
no_license
|
mtutko/InteractiveEuler
|
44a64b220b4c1241353ccf5b38dbc010d1c769dc
|
1c7b9c36f00452b2aa532e5ba9f7845969167ece
|
refs/heads/master
| 2023-03-14T18:09:04.114268
| 2021-03-16T20:07:13
| 2021-03-16T20:07:13
| 323,968,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,448
|
py
|
"""
This is the module InteractiveEuler.
It is used to create a fun, interactive fluids solver.
"""
import sys
import numpy as np
from matplotlib import cm
import pyqtgraph as pg
from PyQt5 import QtGui, QtCore, QtWidgets
import fluid as fl
N = 100
MATSIZE = (N, N)
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
def get_matplotlib_lut(name):
# Get the colormap
colormap = cm.get_cmap(name) # cm.get_cmap("CMRmap")
colormap._init()
lut = (colormap._lut * 255).view(np.ndarray) # Convert matplotlib colormap from 0-1 to 0 -255 for Qt
return lut
def getDomain():
dx = 1 / N
steparr = np.arange(dx/2, 1 + dx/2, step=dx)
X, Y = np.meshgrid(steparr, steparr)
return X, Y
def getXDomain():
X, _ = getDomain()
return np.transpose(X)
def getYDomain():
_, Y = getDomain()
return np.transpose(Y)
def getZeroMatrix():
return np.zeros(MATSIZE)
def initialMatrix():
return getYDomain()
def quiver(X, Y, U, V):
"""Generates vector field visualization on grid (X,Y) -- each of
which are numpy matrices of the X and Y coordinates that one wants
to plot at. (U,V) are numpy matrices that describe the vector field
at the supplied points (X,Y).
Note
----
A normalization to the vector field is not supplied in the current version
of the code.
Return values are expected to be used by
setData(self.datax, self.datay, connect='pairs')
"""
# step 1. position vectors on grid
x0 = np.ndarray.flatten(X)
y0 = np.ndarray.flatten(Y)
#print(x0)
#print(len(x0))
#print(x0.shape)
#print(np.ndarray.flatten(U).shape)
# step 2. compute end points
x1 = x0 + np.ndarray.flatten(U)
#print(x1)
y1 = y0 + np.ndarray.flatten(V)
# step 3 compute scaling
mult_scale = 1.0
# step 4. interspace two arrays
xdata = np.ravel([x0, x1], order='F')
ydata = np.ravel([y0, y1], order='F')
#for count in range(len(xdata)):
# print(count, xdata[count], ydata[count])
# step 5. apply scaling
xdata *= mult_scale
ydata *= mult_scale
# step 6. return data
return xdata, ydata
class ResetSolutionButton(QtWidgets.QPushButton):
""" button for resetting solution """
def __init__(self, parent=None):
super(ResetSolutionButton, self).__init__(parent)
self.setText("Reset Solution")
class solutionView(pg.PlotWidget):
""" main class for viewing 2D solution """
def __init__(self, parent=None):
super(solutionView, self).__init__(parent)
self.ti = 0
self.nVelocitySkip = np.maximum(10, round(N / 15))
print(self.nVelocitySkip)
self.viewMat = initialMatrix()
self.img = pg.ImageItem(self.viewMat)
self.kern = np.array([
[0.0, 0.0, 0.25, 0.0, 0.0],
[0.0, 0.25, 0.5, 0.25, 0.0],
[0.25, 0.5, 1.0, 0.5, 0.25],
[0.0, 0.25, 0.5, 0.25, 0.0],
[0.0, 0.0, 0.25, 0.0, 0.0]])
self.img.setDrawKernel(self.kern, mask=self.kern,
center=(2, 2), mode='add')
self.levels = [0, 1]
self.img.setLevels(self.levels)
self.pressure_lut = get_matplotlib_lut("CMRmap")
self.scalar_lut = get_matplotlib_lut("nipy_spectral")
self.img.setLookupTable(self.pressure_lut) # initial colormap
self.setTitle("Solution")
x_axis = pg.AxisItem('bottom')
y_axis = pg.AxisItem('left')
axis_items = {'left': y_axis, 'bottom': x_axis}
self.setAxisItems(axis_items)
self.setLabel(axis='left', text='Y')
self.setLabel(axis='bottom', text='X')
self.showGrid(x=True, y=True, alpha=1)
# will eventually remove these
# self.hideAxis('bottom')
# self.hideAxis('left')
self.vb = self.getViewBox()
self.vb.setBackgroundColor((100, 10, 34))
self.vb.setMouseEnabled(x=False, y=False)
self.vb.addItem(self.img)
# quiver field for velocity
self.grid = fl.Grid(N)
self.p1 = self.plot()
self.setLimits(xMin=0, xMax=N, yMin=0, yMax=N)
self.plot_flowfield()
pen = pg.mkPen('y', width=3, style=QtCore.Qt.DashLine)
self.vb.setBorder(pen)
self.timer = QtCore.QTimer()
self.timer.setInterval(10)
self.timer.timeout.connect(self.update_plot)
self.timer.start()
def resetSolution(self):
self.viewMat = initialMatrix()
self.img.setImage(self.viewMat)
self.img.setLevels(self.levels)
print("solution was reset!")
def setPressureCmap(self):
self.img.setLookupTable(self.pressure_lut)
print("Pressure cmap appears!")
def setScalarCmap(self):
self.img.setLookupTable(self.scalar_lut)
print("Scalar cmap appears!")
def save_figure(self):
exporter = pg.exporters.ImageExporter(self)
exporter.export('testing!!!.png')
def plot_flowfield(self):
# possibly could use pg.arrayToQPath(x, y, connect='pairs')
self.U = 10*0.5*self.grid.X
self.V = 10*0.5*self.grid.Y
tempDataX, tempDataY = quiver((N-1)*self.grid.X[0::self.nVelocitySkip, 0::self.nVelocitySkip],
(N-1)*self.grid.Y[0::self.nVelocitySkip, 0::self.nVelocitySkip],
self.U[0::self.nVelocitySkip, 0::self.nVelocitySkip],
self.V[0::self.nVelocitySkip, 0::self.nVelocitySkip])
self.p1.setData(tempDataX, tempDataY, connect='pairs')
def update_plot(self):
# velocity field
self.U += np.random.normal(size=(N, N))
self.V += np.random.normal(size=(N, N))
tempDataX, tempDataY = quiver((N-1)*self.grid.X[0::self.nVelocitySkip, 0::self.nVelocitySkip],
(N-1)*self.grid.Y[0::self.nVelocitySkip, 0::self.nVelocitySkip],
self.U[0::self.nVelocitySkip, 0::self.nVelocitySkip],
self.V[0::self.nVelocitySkip, 0::self.nVelocitySkip])
self.p1.setData(tempDataX, tempDataY, connect='pairs')
self.ti += 1
def toggle_quiver(self, command):
if command == 'remove':
self.removeItem(self.p1)
print("Removing Flow Field")
elif command == 'add':
self.addItem(self.p1)
print("Adding Flow Field")
class solutionChooser(QtWidgets.QWidget):
""" main settings class for which solution to view """
def __init__(self, parent=None):
super(solutionChooser, self).__init__(parent)
layout = QtWidgets.QVBoxLayout()
solution_label = QtGui.QLabel("Choose which solution to view")
self.viewPressure = QtGui.QRadioButton("Pressure")
self.viewScalar = QtGui.QRadioButton("Scalar Field")
self.viewVelocity = QtGui.QCheckBox("Velocity")
self.viewPressure.setChecked(True)
self.viewVelocity.setChecked(True)
layout.addWidget(solution_label)
layout.addWidget(self.viewPressure)
layout.addWidget(self.viewScalar)
layout.addWidget(self.viewVelocity)
self.setLayout(layout)
class interactivityChooser(QtWidgets.QWidget):
""" main settings class for which interactivity to have """
def __init__(self, parent=None):
super(interactivityChooser, self).__init__(parent)
layout = QtWidgets.QVBoxLayout()
interactivity_label = QtGui.QLabel("Choose type of interactivity")
self.wallSource = QtGui.QRadioButton("Wall +")
self.wallSink = QtGui.QRadioButton("Wall -")
self.pressureSource = QtGui.QRadioButton("Pressure +")
self.pressureSink = QtGui.QRadioButton("Pressure -")
self.scalarSource = QtGui.QRadioButton("Scalar +")
self.scalarSink = QtGui.QRadioButton("Scalar -")
self.VelocitySource = QtGui.QRadioButton("Velocity +")
self.wallSource.setChecked(True)
layout.addWidget(interactivity_label)
layout.addWidget(self.wallSource)
layout.addWidget(self.wallSink)
layout.addWidget(self.pressureSource)
layout.addWidget(self.pressureSink)
layout.addWidget(self.scalarSource)
layout.addWidget(self.scalarSink)
layout.addWidget(self.VelocitySource)
self.setLayout(layout)
class Settings(QtWidgets.QWidget):
""" main settings class """
def __init__(self, parent=None):
super(Settings, self).__init__(parent)
layout = QtWidgets.QVBoxLayout()
self.reset_btn = ResetSolutionButton()
self.sc = solutionChooser()
self.ic = interactivityChooser()
layout.addWidget(self.reset_btn)
layout.addWidget(self.sc)
layout.addWidget(self.ic)
layout.addStretch(1)
self.setLayout(layout)
class MainWindow(QtWidgets.QMainWindow):
""" main class for InteractiveEuler """
def __init__(self):
super(MainWindow, self).__init__()
# initialize UI
self.init_ui()
# setup euler solver
def init_ui(self):
#uiPath = os.path.join("interactiveeuler","ui","interactiveeuler.ui")
#self.ui = uic.loadUi(uiPath)
self.setWindowTitle('InteractiveEuler')
self.resize(800, 600)
bar = self.menuBar()
# Creating menus using a QMenu object
fileMenu = QtWidgets.QMenu("&File", self)
bar.addMenu(fileMenu)
# Creating menus using a title
saveMenu = bar.addMenu("&Save")
self.saveAction = QtWidgets.QAction("&Save", self)
self.saveAction.triggered.connect(self.save_figure)
helpMenu = bar.addMenu("&Help")
main_layout = QtWidgets.QHBoxLayout()
self.sv = solutionView()
sl = Settings()
sl.reset_btn.clicked.connect(self.resetSignal)
# colormaps (and probably other things eventually)
sl.sc.viewScalar.toggled.connect(self.scalar_toggled)
sl.sc.viewPressure.toggled.connect(self.pressure_toggled)
sl.sc.viewVelocity.toggled.connect(lambda: self.velocity_toggled(sl.sc.viewVelocity))
main_layout.addWidget(self.sv)
main_layout.addWidget(sl)
# status bar
self.statusBar = QtWidgets.QStatusBar()
self.statusBar.showMessage('Message in statusbar.')
self.setStatusBar(self.statusBar)
width = self.frameGeometry().width()
height = self.frameGeometry().height()
s = f"(width, height) = ({width}, {height})"
self.statusBar.showMessage(s)
main_widget = QtWidgets.QWidget()
main_widget.setLayout(main_layout)
self.setCentralWidget(main_widget)
def resetSignal(self):
self.sv.resetSolution()
def pressure_toggled(self):
self.sv.setPressureCmap()
def scalar_toggled(self):
self.sv.setScalarCmap()
def velocity_toggled(self, btn):
if btn.isChecked():
self.sv.toggle_quiver("add")
else:
self.sv.toggle_quiver("remove")
def save_figure(self):
self.sv.save_figure()
print("called save figure!")
def main():
app = QtGui.QApplication(sys.argv)
app.setApplicationName('InteractiveEuler')
window = MainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"peastham@math.fsu.edu"
] |
peastham@math.fsu.edu
|
a6fd729b46af741538efa1be1f8af4c5325dd1ad
|
ffc076dc622cf3b745c6da899aff193c340cdbd1
|
/graphs_weak.py
|
4777964d866c02524f5af83a4dbe9303430657a6
|
[] |
no_license
|
casperhansen/RBSH
|
e10fb69af9e3ea67c67be1cc4ca0a77468bc81e5
|
f464790c6d05f909a86f7b76defd4fec993ed5e7
|
refs/heads/master
| 2020-09-26T05:30:31.378722
| 2019-12-16T13:22:45
| 2019-12-16T13:22:45
| 226,176,650
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,570
|
py
|
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
import numpy as np
from tensorflow.losses import compute_weighted_loss, Reduction
def hinge_loss_eps(labels, logits, epsval, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
if labels is None:
raise ValueError("labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_eps = array_ops.ones_like(labels)*epsval
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_eps, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
def make_graph_queue_SingleWeak(sigma_annealing, rank_weight, kl_weight, bits, dropout_keep, vocab_size, emb_size,
embedding, importance_embedding, optimizer, batch_size, inputs, test_placeholder_set,
is_eval, maskvalue, output_activation_function=tf.nn.tanh, deterministic_eval=True,
noise_type=1, pretrained_emb=False, emb_input_embedding=None, use_sigma_directly=False,
use_ranking=True, hinge_val=1.0):
print("network, use ranking", use_ranking)
hidden_neurons_encode = 1000
encoder_layers = 2
used_input = tf.cond(is_eval, lambda: test_placeholder_set, lambda: inputs, name="train_or_test_cond")
doc, doc1, doc2, doc1weak, doc2weak, masking = used_input
if emb_input_embedding is not None:
print("apply Importance embedding on docs")
doc_enc = doc * importance_embedding #tf.matmul(doc, tf.expand_dims(importance_embedding, -1) * emb_input_embedding)
doc1_enc = doc1 * importance_embedding #tf.matmul(doc1, tf.expand_dims(importance_embedding, -1) * emb_input_embedding)
doc2_enc = doc2 * importance_embedding #tf.matmul(doc2, tf.expand_dims(importance_embedding, -1) * emb_input_embedding)
else:
doc_enc = doc
doc1_enc = doc1
doc2_enc = doc2
#################### Bernoulli Sample #####################
## ref code: https://r2rt.com/binary-stochastic-neurons-in-tensorflow.html
def bernoulliSample(x):
"""
Uses a tensor whose values are in [0,1] to sample a tensor with values in {0, 1},
using the straight through estimator for the gradient.
E.g.,:
if x is 0.6, bernoulliSample(x) will be 1 with probability 0.6, and 0 otherwise,
and the gradient will be pass-through (identity).
"""
g = tf.get_default_graph()
with ops.name_scope("BernoulliSample") as name:
with g.gradient_override_map({"Ceil": "Identity", "Sub": "BernoulliSample_ST"}):
if deterministic_eval:
mus = tf.cond(is_eval, lambda: tf.ones(tf.shape(x))*0.5, lambda: tf.random_uniform(tf.shape(x)))
else:
mus = tf.random_uniform(tf.shape(x))
return tf.ceil(x - mus, name=name)
@ops.RegisterGradient("BernoulliSample_ST")
def bernoulliSample_ST(op, grad):
return [grad, tf.zeros(tf.shape(op.inputs[1]))]
###########################################################
# encode
def encoder(doc, hidden_neurons_encode, encoder_layers):
doc_layer = tf.layers.dense(doc, hidden_neurons_encode, name="encode_layer0",
reuse=tf.AUTO_REUSE, activation=tf.nn.relu)
#doc_layer = tf.nn.dropout(doc_layer, dropout_keep)
for i in range(1,encoder_layers):
doc_layer = tf.layers.dense(doc_layer, hidden_neurons_encode, name="encode_layer"+str(i),
reuse=tf.AUTO_REUSE, activation=tf.nn.relu)
doc_layer = tf.nn.dropout(doc_layer, tf.cond(is_eval, lambda: 1.0, lambda: dropout_keep))
doc_layer = tf.layers.dense(doc_layer, bits, name="last_encode", reuse=tf.AUTO_REUSE, activation=tf.nn.sigmoid)
bit_vector = bernoulliSample(doc_layer)
return bit_vector, doc_layer
bit_vector, cont_vector = encoder(doc_enc, hidden_neurons_encode, encoder_layers)
if use_ranking:
bit_vector_doc1, cont1 = encoder(doc1_enc, hidden_neurons_encode, encoder_layers)
bit_vector_doc2, cont2 = encoder(doc2_enc, hidden_neurons_encode, encoder_layers)
# decode
# transform s from [None, bits] into [None, emb_size]
log_sigma2 = tf.layers.dense(cont_vector, bits, name="decode_logsigma2", activation=tf.nn.sigmoid)
e = tf.random.normal([batch_size, bits])
if noise_type == 2: #annealing
print("use annealing")
noisy_bit_vector = tf.math.multiply(e, sigma_annealing) + bit_vector
#noisy_bit_vector = tf.maximum(noisy_bit_vector, 0)
elif noise_type == 1: #learned
if use_sigma_directly:
print("use sigma directly")
noisy_bit_vector = tf.math.multiply(e, log_sigma2) + bit_vector
else:
noisy_bit_vector = tf.math.multiply(e, tf.sqrt(tf.exp(log_sigma2))) + bit_vector
elif noise_type == 0: #none
noisy_bit_vector = bit_vector
else:
print("unknown noise_type", noise_type)
exit()
# s * Emb
softmax_bias = tf.Variable(tf.zeros(vocab_size), name="softmax_bias")
#print(importance_embedding, tf.transpose(embedding))
#print( tf.multiply(tf.transpose(embedding), importance_embedding) )
#exit()
#if pretrained_emb:
print("pretrained embedding downscaling layer")
embedding = tf.layers.dense(embedding, bits, name="lower_dim_embedding_layer")
dot_emb_vector = tf.linalg.matmul(noisy_bit_vector, tf.multiply(tf.transpose(embedding), importance_embedding) ) + softmax_bias
softmaxed = tf.nn.softmax(dot_emb_vector)
logaritmed = tf.math.log(tf.maximum(softmaxed, 1e-10))
logaritmed = tf.multiply(logaritmed, tf.cast(doc > 0, tf.float32)) #tf.cast(doc>0, tf.float32)) # set words not occuring to 0
# loss
num_samples = tf.reduce_sum(masking)
def my_dot_prod(a,b):
return tf.reduce_sum(tf.multiply(a, b), 1)
if use_ranking:
use_dot = False
if use_dot:
bit_vector_sub = 2*bit_vector - 1
bit_vector_doc1_sub = 2*bit_vector_doc1 - 1
bit_vector_doc2_sub = 2*bit_vector_doc2 - 1
dist1 = my_dot_prod(bit_vector_sub, bit_vector_doc1_sub) #tf.reduce_sum(tf.math.pow(bit_vector - bit_vector_doc1, 2), axis=1) #tf.norm(bit_vector - bit_vector_doc1, axis=1)
dist2 = my_dot_prod(bit_vector_sub, bit_vector_doc2_sub) #tf.reduce_sum(tf.math.pow(bit_vector - bit_vector_doc2, 2), axis=1) #tf.norm(bit_vector - bit_vector_doc2, axis=1)
signpart = tf.cast(doc1weak > doc2weak, tf.float32)
else:
dist1 = tf.reduce_sum(tf.math.pow(bit_vector - bit_vector_doc1, 2), axis=1) #tf.norm(bit_vector - bit_vector_doc1, axis=1)
dist2 = tf.reduce_sum(tf.math.pow(bit_vector - bit_vector_doc2, 2), axis=1) #tf.norm(bit_vector - bit_vector_doc2, axis=1)
signpart = tf.cast(doc1weak > doc2weak, tf.float32)
if use_dot:
rank_loss = hinge_loss_eps(labels=(signpart), logits=(dist1-dist2), epsval= hinge_val)#bits/4.0)
else:
equal_score = tf.cast( tf.abs(doc1weak - doc2weak) < 1e-10, tf.float32)
unequal_score = tf.cast( tf.abs(doc1weak - doc2weak) >= 1e-10, tf.float32)
rank_loss_uneq = hinge_loss_eps(labels=(signpart), logits=(dist2 - dist1), epsval=hinge_val, weights=unequal_score)#bits / 8.0)
eq_dist = tf.abs(dist2 - dist1)
rank_loss_eq = compute_weighted_loss( eq_dist, weights=equal_score, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS)
rank_loss = rank_loss_uneq + rank_loss_eq
if use_ranking:
e1 = tf.random.normal([batch_size, bits])
e2 = tf.random.normal([batch_size, bits])
noisy_bit_vector1 = tf.math.multiply(e1, sigma_annealing) + bit_vector_doc1
noisy_bit_vector2 = tf.math.multiply(e2, sigma_annealing) + bit_vector_doc2
dot_emb_vector1 = tf.linalg.matmul(noisy_bit_vector1,
tf.multiply(tf.transpose(embedding), importance_embedding)) + softmax_bias
dot_emb_vector2 = tf.linalg.matmul(noisy_bit_vector2,
tf.multiply(tf.transpose(embedding), importance_embedding)) + softmax_bias
softmaxed1 = tf.nn.softmax(dot_emb_vector1)
logaritmed1 = tf.math.log(tf.maximum(softmaxed1, 1e-10))
logaritmed1 = tf.multiply(logaritmed1, tf.cast(doc1 > 0, tf.float32))
softmaxed2 = tf.nn.softmax(dot_emb_vector2)
logaritmed2 = tf.math.log(tf.maximum(softmaxed2, 1e-10))
logaritmed2 = tf.multiply(logaritmed2, tf.cast(doc2 > 0, tf.float32))
loss_recon1 = tf.reduce_sum(tf.multiply(tf.reduce_sum(logaritmed1, 1), masking) / num_samples, axis=0)
loss_recon2 = tf.reduce_sum(tf.multiply(tf.reduce_sum(logaritmed2, 1), masking) / num_samples, axis=0)
doc_1_2_recon_loss = -(loss_recon1 + loss_recon2)
# VAE loss part
loss_recon = tf.reduce_sum( tf.multiply(tf.reduce_sum(logaritmed, 1), masking)/num_samples, axis=0)
recon_per_word = logaritmed #print("--------", logaritmed)
print("#################", importance_embedding)
loss_kl = tf.multiply(cont_vector, tf.math.log( tf.maximum(cont_vector/0.5, 1e-10) )) + \
tf.multiply(1 - cont_vector, tf.math.log( tf.maximum((1 - cont_vector)/0.5, 1e-10) ))
loss_kl = tf.reduce_sum( tf.multiply(tf.reduce_sum(loss_kl, 1), masking)/num_samples, axis=0)
loss_vae = -(loss_recon - kl_weight*loss_kl)
if use_ranking:
loss_rank_weighted = rank_weight * rank_loss
loss = loss_rank_weighted + loss_vae + doc_1_2_recon_loss # we want to maximize, but Adam only support minimize
loss_rank_unweighted = rank_loss - (loss_recon - kl_weight*loss_kl) + doc_1_2_recon_loss
else:
loss = loss_vae
rank_loss = loss*0
loss_rank_weighted = -1
loss_rank_unweighted = -1
dist1 = -1
dist2 = -1
signpart = -1
rank_loss_eq = rank_loss
rank_loss_uneq = rank_loss
tf.summary.scalar('loss_vae', loss_vae)
tf.summary.scalar('loss_kl', loss_kl)
tf.summary.scalar('loss_recon', loss_recon)
tf.summary.scalar('loss_rank_raw', rank_loss)
tf.summary.scalar('loss_rank_weighted', loss_rank_weighted)
tf.summary.scalar('loss_total', loss)
tf.summary.scalar("kl_weight", kl_weight)
tf.summary.scalar("rank_weight", rank_weight)
tf.summary.scalar("unweighted_loss", loss_rank_unweighted)
tf.summary.scalar('loss_rank_raw_eq', rank_loss_eq)
tf.summary.scalar('loss_rank_raw_uneq', rank_loss_uneq)
#tf.summary.scalar("learned sigma value", tf.reduce_sum(log_sigma2)/(num_samples*bits))
#tf.summary.scalar("learned sigma value (as used)", tf.reduce_sum(tf.sqrt(tf.exp(log_sigma2)))/(num_samples*bits))
tf.summary.scalar("sigma annealing value", sigma_annealing)
print("vae",loss_vae)
print("recon",loss_recon)
print("kl",loss_kl)
print("rank weighted",loss_rank_weighted)
print("rank loss", rank_loss)
print("total loss", loss)
print("kl weight", kl_weight)
# optimize
grad = optimizer.minimize(loss)
init = tf.global_variables_initializer()
# make input dictionary
def input_dict(docval, maska):
return {doc: docval, masking: maska}
merged = tf.summary.merge_all()
return init, grad, loss, input_dict, bit_vector, cont_vector, is_eval, dist1, dist2, signpart, \
rank_loss, merged, loss_rank_unweighted, importance_embedding, recon_per_word
|
[
"noreply@github.com"
] |
casperhansen.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.