blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8968c8502e1f77e7db79c40fab30e6f2050ba0e4
|
e39382423b52caa7f973496aaf04da0f517223e4
|
/RtMonSys/models/models_common.py
|
bfecb8c13303865388551a9ced03a90a3d9953b8
|
[] |
no_license
|
haloooo/2S
|
8af7b2d1c2e66415d00f6fa090a8b761eadf831d
|
2767cd447a53e91910deb752b70d5d03682d6266
|
refs/heads/master
| 2020-04-05T06:43:41.143662
| 2018-11-08T04:06:02
| 2018-11-08T04:06:02
| 156,648,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,544
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.db import connections
import os
from RtMonSys.models import model_setting
def get_config(key):
# 加载配置文件
file_path = os.getcwd() + '/config/config.json'
fp = open(file_path)
json_data = json.load(fp)
return json_data[key]
def databaseException(exp):
if 'could not connect to server' in str(exp):
return 101
else:
return 102
# 获取ng数据
def getDetailList(model_name, process_cd, datatype_id, start_time, end_time, firstFlg):
result = []
sql_orderby = "ORDER BY line_cd, station_slot"
# 获取产线按钮数据的场合
if firstFlg:
sql_orderby = " ORDER BY line_cd ASC, count_serial_cd DESC "
try:
# 从config文件中取得line数组
database_list = get_config("database")
lineArr = []
# 根据model_name和process_cd获取limit
for item in database_list:
if item["MODEL"] == model_name:
for dataItem in item["DATA"]:
if dataItem["PROCESS"] == process_cd:
limit = dataItem["LIMIT"]
break
for row in database_list:
if row['MODEL'] == model_name:
# 从配置文件里取得LINE
lineArr = row['LINE']
break
cur = connections[model_name].cursor()
sql = "SELECT \
line_cd,\
station_slot,\
COUNT( serial_cd ) AS count_serial_cd \
FROM\
(\
SELECT DISTINCT \
line_cd,\
T2.station_slot,\
T1.serial_cd,\
judge_text \
FROM\
(\
SELECT \
serial_cd,\
line_cd,\
judge_text \
FROM\
(\
SELECT \
serial_cd,\
process_at,\
line_cd,\
judge_text, \
ROW_NUMBER ( ) OVER ( PARTITION BY line_cd, serial_cd ORDER BY process_at DESC ) RANK1 \
FROM\
(\
SELECT DISTINCT \
i.serial_cd,\
i.process_at,\
p.line_cd,\
i.judge_text,\
ROW_NUMBER ( ) OVER ( PARTITION BY line_cd ORDER BY i.process_at DESC ) RANK \
FROM \
t_insp_" + model_name + " i,\
m_process p \
WHERE \
i.proc_uuid = p.proc_uuid \
AND p.line_cd IN ( %s ) \
AND p.process_cd = '(process_cd)' \
AND i.process_at >= '(start_time)' \
AND i.process_at <= '(end_time)' \
ORDER BY i.process_at DESC \
) BASE1 \
WHERE RANK <= (LIMIT) \
) BASE2 \
WHERE \
judge_text = '1' \
AND RANK1 = 1 \
) T1 \
INNER JOIN (\
SELECT DISTINCT \
f.partsserial_cd AS station_slot,\
f.serial_cd,\
f.process_at \
FROM \
m_process p,\
t_faci_" + model_name + " f \
WHERE \
f.proc_uuid = p.proc_uuid \
AND p.process_cd = '(process_cd)' \
AND f.datatype_id = '(datatype_id)' \
) T2 ON T1.serial_cd = T2.serial_cd AND T1.judge_text = '1' \
) T3 \
GROUP BY \
line_cd,station_slot " + sql_orderby
sql = sql % ','.join(['%s'] * len(lineArr))
sql = sql.replace("(process_cd)", process_cd).replace("(start_time)", start_time).replace("(end_time)", end_time) \
.replace("(datatype_id)", datatype_id).replace("(LIMIT)", str(limit))
cur.execute(sql, lineArr)
rows = cur.fetchall()
# 计算对应的IN/Yield
sql_1 = "SELECT \
line_cd,\
station_slot,\
COUNT( serial_cd ) AS count_serial_cd \
FROM\
(\
SELECT DISTINCT \
line_cd,\
T2.station_slot,\
T1.serial_cd,\
judge_text \
FROM\
(\
SELECT \
serial_cd,\
line_cd,\
judge_text \
FROM\
(\
SELECT \
serial_cd,\
process_at,\
line_cd,\
judge_text, \
ROW_NUMBER ( ) OVER ( PARTITION BY line_cd, serial_cd ORDER BY process_at DESC ) RANK1 \
FROM\
(\
SELECT DISTINCT \
i.serial_cd,\
i.process_at,\
p.line_cd,\
i.judge_text,\
ROW_NUMBER ( ) OVER ( PARTITION BY line_cd ORDER BY i.process_at DESC ) RANK \
FROM \
t_insp_" + model_name + " i,\
m_process p \
WHERE \
i.proc_uuid = p.proc_uuid \
AND p.line_cd IN ( %s ) \
AND p.process_cd = '(process_cd)' \
AND i.process_at >= '(start_time)' \
AND i.process_at <= '(end_time)' \
ORDER BY i.process_at DESC \
) BASE1 \
WHERE RANK <= (LIMIT) \
) BASE2 \
WHERE \
RANK1 = 1 \
) T1 \
INNER JOIN (\
SELECT DISTINCT \
f.partsserial_cd AS station_slot,\
f.serial_cd,\
f.process_at \
FROM \
m_process p,\
t_faci_" + model_name + " f \
WHERE \
f.proc_uuid = p.proc_uuid \
AND p.process_cd = '(process_cd)' \
AND f.datatype_id = '(datatype_id)' \
) T2 ON T1.serial_cd = T2.serial_cd \
) T3 \
GROUP BY \
line_cd,station_slot " + sql_orderby
sql_1 = sql_1 % ','.join(['%s'] * len(lineArr))
sql_1 = sql_1.replace("(process_cd)", process_cd).replace("(start_time)", start_time).replace("(end_time)",
end_time) \
.replace("(datatype_id)", datatype_id).replace("(LIMIT)", str(limit))
cur.execute(sql_1, lineArr)
rows_1 = cur.fetchall()
# 产线按钮的场合
if firstFlg:
last_line = ""
for row in rows:
# 每条产线只取第一条数据(ng最大的station)
if row[0] != last_line:
result.append({"line_cd": row[0], "ng_count": int(row[2]), })
last_line = row[0]
else:# 下方ng列表的场合
# 获取NG_RGB
for item in database_list:
if item["MODEL"] == model_name:
for dataItem in item["DATA"]:
if dataItem["PROCESS"] == process_cd:
ngRgb = dataItem["JIG"]
break
count = 0
for row in rows:
in_ = int(rows_1[count][2])
yield_ = (100 * ((in_- int(row[2]))/in_))
result.append({"line_cd":row[0], "station_slot":row[1], "ng_count":int(row[2]),"in":int(rows_1[count][2]),"yield":yield_ })
count = count + 1
except BaseException as exp:
print(exp)
result = databaseException(exp)
connections[model_name].close()
return result
def getDetailList_update2(model_name,name, process_name, datatype_id, limit, start_time, end_time,JIG_type):
result = []
try:
row = model_setting.getProcess(model_name, name)
INSPECT = (row[0]['INSPECT']).split(',')
str_inspect = ""
for item in INSPECT:
str_inspect += "'%s'," % (item)
str_inspect = str_inspect[:-1]
database_list = get_config("database")
lineArr = []
for row in database_list:
if row['MODEL'] == model_name:
# 从配置文件里取得LINE
lineArr = row['LINE']
break
cur = connections[model_name].cursor()
# 1. before
# sql = "SELECT DISTINCT line_cd,station_slot,serial_cd,judge_text,process_at,inspect_cd,inspect_text FROM\
# (SELECT T2.line_cd,station_slot,T2.serial_cd,T2.judge_text,T2.process_at,d.inspect_cd,d.inspect_text FROM( \
# SELECT line_cd,serial_cd,judge_text,insp_seq,process_at FROM ( SELECT DISTINCT P .line_cd,i.serial_cd, \
# i.process_at,i.judge_text,i.insp_seq,ROW_NUMBER () OVER ( PARTITION BY serial_cd ORDER BY process_at DESC \
# ) RANK FROM t_insp_" + model_name + " i,m_process P WHERE i.proc_uuid = P .proc_uuid AND P .line_cd IN (%s) \
# AND P .process_cd = '(process_cd)' AND i.process_at >= '(start_time)' AND i.process_at <= '(end_time)' \
# ORDER BY i.process_at DESC LIMIT '(limit)') BASE WHERE RANK = 1 ) T2 LEFT JOIN t_data_" + model_name + " d ON T2.insp_seq = d.insp_seq \
# INNER JOIN ( SELECT DISTINCT f.partsserial_cd AS station_slot,f.serial_cd,f.process_at FROM m_process P,t_faci_" + model_name + " f \
# WHERE f.proc_uuid = P .proc_uuid AND P .Process_cd = '(process_cd)' AND f.datatype_id = '(datatype_id)' ) T3 ON T2.serial_cd = T3.serial_cd) T4 \
# WHERE inspect_cd in ("+ str_inspect +") "
# sql = sql % ','.join(['%s'] * len(lineArr))
# sql = sql.replace("(process_cd)", process_name).replace("(start_time)", start_time).replace("(end_time)",end_time).replace("(datatype_id)", datatype_id).replace("(limit)", str(limit))
# 2. after
sql = '''select m.line_cd,m.partsserial_cd,m.serial_cd,d.judge_text,m.process_at,d.inspect_cd,d.inspect_text from t_data_(model_name) as d,
(select i.insp_seq,f.serial_cd,i.line_cd,i.process_at,f.partsserial_cd,i.judge_text,row_number() over (partition by f.serial_cd,f.datatype_id order by f.process_at) from t_faci_(model_name) f ,
(select insp_seq,serial_cd,process_at,judge_text,line_cd,process_cd from
(select a.insp_seq,a.serial_cd,a.process_at,a.judge_text,b.line_cd,b.process_cd,row_number() over (partition by a.serial_cd order by a.process_at desc) from t_insp_(model_name) a,m_process b
where a.proc_uuid = b.proc_uuid
and b.line_cd in (%s)
and b.process_cd = '(process_cd)'
and a.process_at >= '(start_time)'
and a.process_at <= '(end_time)'
) as n
where n.row_number = '1'
order by n.process_at desc
limit '(limit)'
) as I
where f.serial_cd = i.serial_cd
and f.datatype_id = '(datatype_id)'
) as m
where d.insp_seq = m.insp_seq
and m.row_number = '1'
and d.inspect_cd in ('''+ str_inspect +''')'''
sql = sql % ','.join(['%s'] * len(lineArr))
sql = sql.replace('(model_name)', model_name)\
.replace("(process_cd)", process_name)\
.replace("(start_time)", start_time)\
.replace("(end_time)",end_time)\
.replace("(datatype_id)", datatype_id)\
.replace("(limit)", str(limit))
cur.execute(sql, lineArr)
rows = cur.fetchall()
data_list = []
for row in rows:
re = {}
re['line_cd'] = row[0]
re['station_slot'] = row[1]
re['serial_cd'] = row[2]
re['judge_text'] = row[3]
re['process_at'] = row[4].strftime("%Y-%m-%d %H:%M:%S")
re['inspect_cd'] = row[5]
re['inspect_text'] = row[6]
data_list.append(re)
# if not row[0] in line_cd_list:
# line_cd_list.append(row[0])
# data_list = [
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188QVJL4K7A', 'judge_text': '0',
# 'process_at': '2018-08-05 12:30:24', 'inspect_cd': 'OVEN_ST_TIME', 'inspect_text': '16:35:56'},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188QVJL4K7B', 'judge_text': '0',
# 'process_at': '2018-08-05 12:30:24', 'inspect_cd': 'OVEN_ED_TIME', 'inspect_text': '17:34:56'},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188W3JL4K7C', 'judge_text': '1',
# 'process_at': '2018-08-05 12:34:03', 'inspect_cd': 'OVEN_ST_TIME', 'inspect_text': '0'},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188W3JL4K7D', 'judge_text': '1',
# 'process_at': '2018-08-05 12:34:03', 'inspect_cd': 'OVEN_ED_TIME', 'inspect_text': '0'},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188W3JL4K7D', 'judge_text': '1',
# 'process_at': '2018-08-05 12:34:03', 'inspect_cd': 'OVEN_ST_TIME', 'inspect_text': '0'}]
JIG_list = []
line_cd_list = []
serial_cd_list_1 = []
serial_cd_list_2 = []
for item in data_list:
if not item['line_cd'] in line_cd_list:
line_cd_list.append(item['line_cd'])
for item in line_cd_list:
for row_1 in data_list:
if item == row_1['line_cd']:
if not row_1['station_slot'] in JIG_list:
JIG_list.append(row_1['station_slot'])
for JIG_item in JIG_list:
IN_ = 0
NG_ = 0
for row_ in data_list:
if row_['line_cd'] == item and row_['station_slot'] == JIG_item and int(row_['judge_text']) == 1:
if not row_['serial_cd'] in serial_cd_list_1:
serial_cd_list_1.append(row_['serial_cd'])
NG_ = NG_ + 1
if row_['line_cd'] == item and row_['station_slot'] == JIG_item:
if not row_['serial_cd'] in serial_cd_list_2:
serial_cd_list_2.append(row_['serial_cd'])
IN_ = IN_ + 1
Yield = float('%.2f' % (100 * ((IN_- NG_)/IN_)))
result.append({"line_cd":item,"station_slot":JIG_item,"ng_count":NG_,"in":IN_,"yield":Yield})
JIG_list = []
if JIG_type == 'NG COUNT':
result.sort(key=lambda x:x['ng_count'], reverse=True)
else:
result.sort(key=lambda x:x['yield'])
except BaseException as exp:
print(exp)
result = databaseException(exp)
connections[model_name].close()
return result, data_list
def getDetailList_update3(model_name,name,line_cd, process_name, datatype_id, limit, station_slot, start_time, end_time):
try:
row = model_setting.getProcess(model_name, name)
INSPECT = (row[0]['INSPECT']).split(',')
str_inspect = ""
for item in INSPECT:
str_inspect += "'%s'," % (item)
str_inspect = str_inspect[:-1]
cur = connections[model_name].cursor()
sql = '''select m.line_cd,m.partsserial_cd,m.serial_cd,d.judge_text,m.process_at,d.inspect_cd,d.inspect_text from t_data_(model_name) as d,
(select i.insp_seq,f.serial_cd,i.line_cd,i.process_at,f.partsserial_cd,i.judge_text,row_number() over (partition by f.serial_cd,f.datatype_id order by f.process_at) from t_faci_(model_name) f ,
(select insp_seq,serial_cd,process_at,judge_text,line_cd,process_cd from
(select a.insp_seq,a.serial_cd,a.process_at,a.judge_text,b.line_cd,b.process_cd,row_number() over (partition by a.serial_cd order by a.process_at desc) from t_insp_(model_name) a,m_process b
where a.proc_uuid = b.proc_uuid
and b.line_cd = '(line_cd)'
and b.process_cd = '(process_cd)'
and a.process_at >= '(start_time)'
and a.process_at <= '(end_time)'
) as n
where n.row_number = '1'
order by n.process_at desc
limit '(limit)'
) as I
where f.serial_cd = i.serial_cd
and f.datatype_id = '(datatype_id)'
) as m
where d.insp_seq = m.insp_seq
and m.row_number = '1'
and m.partsserial_cd = '(station_slot)'
and d.inspect_cd in ('''+ str_inspect +''')'''
sql = sql.replace('(model_name)', model_name) \
.replace("(line_cd)", line_cd) \
.replace("(process_cd)", process_name)\
.replace("(start_time)", start_time)\
.replace("(end_time)",end_time)\
.replace("(datatype_id)", datatype_id)\
.replace("(limit)", str(limit)) \
.replace("(station_slot)", station_slot)
cur.execute(sql)
rows = cur.fetchall()
data_list = []
for row in rows:
re = {}
re['line_cd'] = row[0]
re['station_slot'] = row[1]
re['serial_cd'] = row[2]
re['judge_text'] = row[3]
re['process_at'] = row[4].strftime("%Y-%m-%d %H:%M:%S")
re['inspect_cd'] = row[5]
re['inspect_text'] = row[6]
data_list.append(re)
# data_list = [
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188QVJL4K7A', 'judge_text': '0',
# 'process_at': '2018-08-05 12:30:24', 'inspect_cd': 'OVEN_ST_TIME', 'inspect_text': '16:35:56'},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188QVJL4K7B', 'judge_text': '0',
# 'process_at': '2018-08-05 12:30:24', 'inspect_cd': 'OVEN_ED_TIME', 'inspect_text': '17:34:56'},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188W3JL4K7D', 'judge_text': '0',
# 'process_at': '2018-08-05 12:34:03', 'inspect_cd': 'OVEN_ST_TIME', 'inspect_text': '0'},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188W3JL4K7D', 'judge_text': '1',
# 'process_at': '2018-08-05 12:34:03', 'inspect_cd': 'OVEN_ED_TIME', 'inspect_text': '0'},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'serial_cd': 'GH9830188W3JL4K7D', 'judge_text': '1',
# 'process_at': '2018-08-05 12:34:03', 'inspect_cd': 'OVEN_ST_TIME', 'inspect_text': '0'}]
except BaseException as exp:
data_list = databaseException(exp)
connections[model_name].close()
return data_list
def getDetailList_update4(model_name,name,line_cd, process_name, datatype_id, limit, start_time, end_time):
try:
row = model_setting.getProcess(model_name, name)
INSPECT = (row[0]['INSPECT']).split(',')
str_inspect = ""
for item in INSPECT:
str_inspect += "'%s'," % (item)
str_inspect = str_inspect[:-1]
cur = connections[model_name].cursor()
sql = '''select m.line_cd,m.partsserial_cd,m.serial_cd,d.judge_text,m.process_at,d.inspect_cd,d.inspect_text from t_data_(model_name) as d,
(select i.insp_seq,f.serial_cd,i.line_cd,i.process_at,f.partsserial_cd,i.judge_text,row_number() over (partition by f.serial_cd,f.datatype_id order by f.process_at) from t_faci_(model_name) f ,
(select insp_seq,serial_cd,process_at,judge_text,line_cd,process_cd from
(select a.insp_seq,a.serial_cd,a.process_at,a.judge_text,b.line_cd,b.process_cd,row_number() over (partition by a.serial_cd order by a.process_at desc) from t_insp_(model_name) a,m_process b
where a.proc_uuid = b.proc_uuid
and b.line_cd = '(line_cd)'
and b.process_cd = '(process_cd)'
and a.process_at >= '(start_time)'
and a.process_at <= '(end_time)'
) as n
where n.row_number = '1'
order by n.process_at desc
limit '(limit)'
) as I
where f.serial_cd = i.serial_cd
and f.datatype_id = '(datatype_id)'
) as m
where d.insp_seq = m.insp_seq
and m.row_number = '1'
and d.inspect_cd in ('''+ str_inspect +''')'''
sql = sql.replace('(model_name)', model_name) \
.replace("(line_cd)", line_cd) \
.replace("(process_cd)", process_name)\
.replace("(start_time)", start_time)\
.replace("(end_time)",end_time)\
.replace("(datatype_id)", datatype_id)\
.replace("(limit)", str(limit))
cur.execute(sql)
rows = cur.fetchall()
data_list = []
for row in rows:
re = {}
re['line_cd'] = row[0]
re['station_slot'] = row[1]
re['serial_cd'] = row[2]
re['judge_text'] = row[3]
re['process_at'] = row[4].strftime("%Y-%m-%d %H:%M:%S")
re['inspect_cd'] = row[5]
re['inspect_text'] = row[6]
data_list.append(re)
except BaseException as exp:
data_list = databaseException(exp)
connections[model_name].close()
return data_list
def getDetailList_update(model_name, process_name, datatype_id, limit, start_time, end_time,JIG_type):
result = []
try:
database_list = get_config("database")
lineArr = []
for row in database_list:
if row['MODEL'] == model_name:
# 从配置文件里取得LINE
lineArr = row['LINE']
break
cur = connections[model_name].cursor()
# After
sql = "SELECT line_cd,partsserial_cd AS station_slot,serial_cd,judge_text FROM ( \
SELECT f.serial_cd, i.line_cd,f.process_at,f.partsserial_cd,i.judge_text,ROW_NUMBER () OVER ( \
PARTITION BY f.serial_cd ORDER BY f.process_at) FROM t_faci_"+ model_name +" f,(SELECT serial_cd,process_at, \
judge_text,line_cd,process_cd FROM(SELECT A .serial_cd,A .process_at,A .judge_text,b.line_cd,b.process_cd,\
ROW_NUMBER () OVER (PARTITION BY A .serial_cd ORDER BY A .process_at DESC) FROM t_insp_"+ model_name +" A,m_process b \
WHERE A .proc_uuid = b.proc_uuid AND b.line_cd IN (%s) AND b.process_cd = '(process_name)' AND A.process_at >= '(start_time)' \
AND A.process_at <= '(end_time)') AS n WHERE ROW_NUMBER = '1') AS I WHERE f.serial_cd = i.serial_cd \
AND f.datatype_id = '(datatype_id)') AS M WHERE M . ROW_NUMBER = '1' LIMIT '(limit)';"
sql = sql % ','.join(['%s'] * len(lineArr))
sql = sql.replace("(process_name)", process_name).replace("(start_time)", start_time).replace("(end_time)",end_time) \
.replace("(datatype_id)", datatype_id).replace("(limit)", str(limit))
cur.execute(sql,lineArr)
rows = cur.fetchall()
line_cd_list = []
JIG_list = []
for row in rows:
if not row[0] in line_cd_list:
line_cd_list.append(row[0])
for item in line_cd_list:
for row_1 in rows:
if item == row_1[0]:
if not row_1[1] in JIG_list:
JIG_list.append(row_1[1])
for JIG_item in JIG_list:
IN_ = 0
NG_ = 0
for row_ in rows:
if row_[0] == item and row_[1] == JIG_item and int(row_[3]) == 1:
NG_ = NG_ + 1
if row_[0] == item and row_[1] == JIG_item:
IN_ = IN_ + 1
Yield = float('%.2f' % (100 * ((IN_- NG_)/IN_)))
result.append({"line_cd":item,"station_slot":JIG_item,"ng_count":NG_,"in":IN_,"yield":Yield})
JIG_list = []
# 排序
# result = [{'line_cd': 'L0sa', 'station_slot': 'sas', 'ng_count': 3, 'in': 2, 'yield': 50.00},
# {'line_cd': 'L08', 'station_slot': 'L08MS05-03', 'ng_count': 1, 'in': 1, 'yield': 0.00},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'ng_count': 6, 'in': 2, 'yield': 49.00},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'ng_count': 35, 'in': 2, 'yield': 47.00},
# {'line_cd': 'L04', 'station_slot': 'L08MS05-03', 'ng_count': 33, 'in': 2, 'yield': 51.00}]
if JIG_type == 'NG COUNT':
result.sort(key=lambda x:x['ng_count'], reverse=True)
else:
result.sort(key=lambda x:x['yield'])
except BaseException as exp:
print(exp)
result = databaseException(exp)
connections[model_name].close()
return result
# 获取指定产线和datatype_id下的station和slot
def getStationSlot(model_name, datatype_id, line_cd):
result = {}
partsserial_cd = line_cd + "MS%"
try:
cur = connections[model_name].cursor()
sql = "SELECT\
max(\
substr(\
partsserial_cd,\
position( 'MS' IN partsserial_cd ) + 2,\
( position( '-' IN partsserial_cd ) - ( position( 'MS' IN partsserial_cd ) + 2 ) ) \
) \
) AS station,\
max( substr( partsserial_cd, position( '-' IN partsserial_cd ) + 1 ) ) AS slot \
FROM\
t_faci_" + model_name + " faci \
WHERE\
datatype_id = %s \
AND partsserial_cd LIKE %s"
cur.execute(sql, (datatype_id, partsserial_cd))
rows = cur.fetchall()
for row in rows:
result = {"station": row[0], "slot": row[1]}
except BaseException as exp:
print(exp)
result = databaseException(exp)
connections[model_name].close()
return result
def getJIGByLine_(model_name, process_cd, start_time, end_time):
result = []
try:
# 从config文件中取得line数组
database_list = get_config("database")
lineArr = []
# 根据model_name和process_cd获取limit
for item in database_list:
if item["MODEL"] == model_name:
for dataItem in item["DATA"]:
if dataItem["PROCESS"] == process_cd:
limit = dataItem["LIMIT"]
break
for row in database_list:
if row['MODEL'] == model_name:
# 从配置文件里取得LINE
lineArr = row['LINE']
break
cur = connections[model_name].cursor()
sql = "SELECT line_cd, COUNT (B.line_cd) FROM ( \
SELECT DISTINCT A .serial_cd,A .judge_text,A .line_cd \
FROM ( SELECT DISTINCT (i.serial_cd),i.process_at,i.judge_text,P .line_cd \
FROM t_insp_"+model_name+" i,m_process P WHERE i.proc_uuid = P .proc_uuid \
AND P .line_cd IN ( %s ) AND P .Process_cd = '(process_cd)' \
AND i.process_at >= '(start_time)' \
AND i.process_at <= '(end_time)' \
ORDER BY i.process_at DESC LIMIT '(LIMIT)') AS A WHERE a.judge_text = '1' \
ORDER BY A .judge_text) AS B GROUP BY B.line_cd; "
sql = sql % ','.join(['%s'] * len(lineArr))
sql = sql.replace("(process_cd)", process_cd).replace("(start_time)", start_time).replace("(end_time)", end_time) \
.replace("(LIMIT)", str(limit))
cur.execute(sql, lineArr)
rows = cur.fetchall()
count = 0
for line in lineArr:
if checkExist(line, rows):
for row in rows:
if line == row[0]:
count = int(row[1])
break
else:
count = 0
result.append(count)
except BaseException as exp:
print(exp)
# result = databaseException(exp)
connections[model_name].close()
return result
def getNGByLine_(model_name, process_cd, start_time, end_time):
result = []
try:
# 从config文件中取得line数组
database_list = get_config("database")
lineArr = []
# 根据model_name和process_cd获取limit
for item in database_list:
if item["MODEL"] == model_name:
for dataItem in item["DATA"]:
if dataItem["PROCESS"] == process_cd:
limit = dataItem["LIMIT"]
break
for row in database_list:
if row['MODEL'] == model_name:
# 从配置文件里取得LINE
lineArr = row['LINE']
break
cur = connections[model_name].cursor()
sql = "SELECT line_cd, COUNT (B.line_cd) FROM ( \
SELECT DISTINCT A .serial_cd,A .judge_text,A .line_cd \
FROM ( SELECT DISTINCT (i.serial_cd),i.process_at,i.judge_text,P .line_cd \
FROM t_insp_" + model_name + " i,m_process P WHERE i.proc_uuid = P .proc_uuid \
AND P .line_cd IN ( %s ) AND P .Process_cd = '(process_cd)' \
AND i.process_at >= '(start_time)' \
AND i.process_at <= '(end_time)' \
ORDER BY i.process_at DESC LIMIT '(LIMIT)') AS A \
ORDER BY A .judge_text) AS B GROUP BY B.line_cd; "
sql = sql % ','.join(['%s'] * len(lineArr))
sql = sql.replace("(process_cd)", process_cd).replace("(start_time)", start_time).replace("(end_time)", end_time) \
.replace("(LIMIT)", str(limit))
cur.execute(sql, lineArr)
rows = cur.fetchall()
count = 0
for line in lineArr:
if checkExist(line, rows):
for row in rows:
if line == row[0]:
count = int(row[1])
break
else:
count = 0
result.append(count)
except BaseException as exp:
print(exp)
# result = databaseException(exp)
connections[model_name].close()
return result
def getJIG_NGByLine(model_name, process_cd, limit, start_time, end_time):
result = []
try:
# 从config文件中取得line数组
database_list = get_config("database")
lineArr = []
for row in database_list:
if row['MODEL'] == model_name:
# 从配置文件里取得LINE
lineArr = row['LINE']
break
cur = connections[model_name].cursor()
# sql = "SELECT DISTINCT (i.serial_cd),i.judge_text,P .line_cd \
# FROM t_insp_"+ model_name +" i,m_process P WHERE i.proc_uuid = P .proc_uuid AND P .line_cd IN ( %s ) \
# AND P .Process_cd = '(process_cd)' AND i.process_at >= '(start_time)' \
# AND i.process_at <= '(end_time)'ORDER BY i.judge_text DESC LIMIT '(LIMIT)';"
sql = "SELECT DISTINCT (i.serial_cd),i.process_at,i.judge_text,P .line_cd \
FROM t_insp_" + model_name + " i,m_process P WHERE i.proc_uuid = P .proc_uuid AND P .line_cd IN ( %s ) \
AND P .Process_cd = '(process_cd)' AND i.process_at >= '(start_time)' \
AND i.process_at <= '(end_time)'ORDER BY i.process_at DESC LIMIT '(LIMIT)';"
sql = sql % ','.join(['%s'] * len(lineArr))
sql = sql.replace("(process_cd)", process_cd).replace("(start_time)", start_time).replace("(end_time)",end_time) \
.replace("(LIMIT)", str(limit))
cur.execute(sql, lineArr)
rows = cur.fetchall()
rows_list = []
serial_cd_list = []
for row in rows:
if not row[0] in serial_cd_list:
serial_cd_list.append(row[0])
rows_list.append(row)
ng_list = []
in_list = []
ng_count = 0
in_count = 0
for item in lineArr:
for row in rows_list:
if row[3] == item and int(row[2]) == 1:
ng_count = ng_count + 1
if row[3] == item:
in_count = in_count + 1
ng_list.append(ng_count)
in_list.append(in_count)
ng_count = 0
in_count = 0
except BaseException as exp:
print(exp)
# result = databaseException(exp)
connections[model_name].close()
return ng_list, in_list
def checkExist(line, lines):
flag = False
for item in lines:
if item[0] == line:
flag = True
return flag
if __name__ == '__main__':
a = ['a','b']
b = ""
for item in a:
b += "'%s',"%(item)
print(b[:-1])
|
[
"Administrator@TPEC.TPC.LOCAL"
] |
Administrator@TPEC.TPC.LOCAL
|
d2e32dfe2d6b6b970e7ce6e63bcaaf759826b66f
|
d5273caa648c5041e70ae33925c8787fe323fa9a
|
/gui_test.py
|
b71593d57008e7367c5d7d0b0345bd2e40d41e14
|
[] |
no_license
|
piyu23srma/Rpi_GUI_tutorials
|
5cc4d9758944c90cfe7ccab6f525360421d7106b
|
8dd12530f0d6461db6bbe379f82b9ddb6599d1d8
|
refs/heads/master
| 2020-03-23T03:03:57.867037
| 2018-07-15T09:17:06
| 2018-07-15T09:17:06
| 141,007,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
from guizero import App, Text, TextBox, PushButton, Slider, Picture
def say_my_name():
welcome_message.value = my_name.value
def change_text_size(slider_value):
welcome_message.size = slider_value
app = App(title="Hello world")
welcome_message = Text(app, text="Welcome to my app", size=40, font="Times New Roman", color="lightblue")
my_name = TextBox(app, width=30)
update_text = PushButton(app, command= say_my_name,text="Display my name")
text_size = Slider(app, command=change_text_size,start=10, end=80)
my_dp = Picture(app, image="dp.gif")
app.display()
|
[
"piyu23srma@gmail.com"
] |
piyu23srma@gmail.com
|
cdc0900d3b2677c0be666cbdd443353d5385757e
|
64ffb2e803a19e5dc75ec8fa0f277609d34e0cc7
|
/dynamodb/update_counter_atomically.py
|
ddfbf9dfad90311a0332a3cbd1a990c0830b6c51
|
[] |
no_license
|
arunpa0206/awstraining
|
687bc4206dfd65693039c525e8a4ff39d14e89d5
|
2eae2353b75a2774f9f47b40d76d63c7f9e08bb4
|
refs/heads/master
| 2021-05-10T15:06:48.652021
| 2019-08-20T10:36:29
| 2019-08-20T10:36:29
| 118,538,574
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import decimal
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='us-west-2', endpoint_url="http://localhost:8000")
table = dynamodb.Table('Movies')
title = "The Big New Movie"
year = 2015
response = table.update_item(
Key={
'year': year,
'title': title
},
UpdateExpression="set info.rating = info.rating + :val",
ExpressionAttributeValues={
':val': decimal.Decimal(1)
},
ReturnValues="UPDATED_NEW"
)
print("UpdateItem succeeded:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
|
[
"mail_arunn@yahoo.com"
] |
mail_arunn@yahoo.com
|
aaf6c34add63fd4cb4417be36efa84c09735f089
|
4d160b0e2479a6e1e64a0afda88c02c9d5ab0b32
|
/catkin/src/volti/scripts/c_esc_vols.py
|
fcc53e1aa21fa8de9af10ec3d52c9c4042ecbdf0
|
[] |
no_license
|
eigendreams/esferico
|
5d7a9e5292a490fc15a259bf0f1bfd22b66920e5
|
75c2cbed1bbdd05a33602a9ba5d2ff46dfffbc74
|
refs/heads/master
| 2020-11-26T19:40:31.661159
| 2015-04-29T17:56:58
| 2015-04-29T17:56:58
| 34,538,674
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,762
|
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import rospy
from std_msgs.msg import Int16
from ino_mod import *
class Control_ESC_Volantes:
#
def __init__(self, node_name_override = 'c_esc_vols'):
#
rospy.init_node(node_name_override)
self.nodename = rospy.get_name()
rospy.loginfo("Node starting with name %s", self.nodename)
#
self.rate = rospy.get_param("rate", 10)
#
# state = -1 -> volantes apagandose
# state = 0 -> volantes apagados
# state = 1 -> volantes encendiendo
# state = 2 -> volante a velocidad crucero
#
# vol1 y vol2 de 0 a 100,00
#
self.state = 0
#
self.init_time = rospy.get_time()
self.tick_time = 1. / self.rate
self.task_done = False
#
self.signal = 0
#
self.vol1 = 0
self.vol2 = 0
#
self.rise_time = int(rospy.get_param("rise_vol_segs", 30))
#
# Los datos de salida a los ESC deberan seguir el patron del resto del codigo, con una diferencia
# En general, vamos de -100,00 a 100,00, en este caso, solo tiene sentido ir de 0 a 100,00
# se dejan dos publicadores, por si acaso se quiera implementar alguna funcionalidad en otro nodo que
# necesite hacer un uso dispar de los volantes
#
self.vol1Pub = rospy.Publisher('vol1', Int16)
self.vol2Pub = rospy.Publisher('vol2', Int16)
#
# Las senales de control seran simplemente
#
# 0 apagar los volantes, si una secuencia esta ya activada, el apagado debera de ser gradual
# 1-100 encender los volantes al porcentaje, si una secuencia no esta activada, el encendido debera ser gradual
#
self.sgnSub = rospy.Subscriber('vol_sgn', Int16, self.sgncb)
#
def sgncb(self, data):
#
self.signal = data.data
#
def update(self):
#
# Este nodo trata de brindar una secuencia de arranque suave para los volantes, por el potencial golpe que podria
# sufrir el robot, de iniciarse de forma violenta. Se tiene pensado que los volantes alcanzen la velocidad maxima
# en 30 segundos, como maximo, mediante parametros que se definen como parametros de ROS, pero con defaults
# especificados en el codigo superior.
#
# Una posibilidad mas seria que la senal de control de los volantes, puesto que deben girar a la misma velocidad, se
# repita en el micro para ambos, y se ahorren problemas de retraso de comunicaciones que se reflejen en jerk
#
# Es una propuesta aceptable, pero deberia brindarse alguna forma de comunicacion explicita de ello fuera del codigo
# Propongo usar algun mensaje adicional, por ejemplo, usar alive como una serie de codigos de control que especifiquen el
# modo para varios sistemas en forma de una mascara de bits, con un maximo de 14 opciones, la primera prohibida porque tiene signo,
# y la ultima porque especifica el estado como alive o no
#
# ok, pero no sera implementado aun
#
# Esta clase necesita de varios parametros:
#
# RISE_TIME o el tiempo necesario para alcanzar la velocidad de crucero
# CON_SIGN o una senal que le indique cuando parar o iniciar los volantes, todos los inicios deben de durar ej 30 segundos
# pero los paros podrian hacerse simplemente apagando la salida de los ESC, recomiendo, que los paros se hagan de
# manera controlada y durante un tiempo total de ej dos minutos, por la tremenda energia que se tiene que disipar
#
# Usamos una maquina de estados desacoplada del resto del flujo del programa, porque es necesario asegurar que los estados se
# cumplan siempre de forma precisa, no hacerlo podria ser peligroso, por la gran velocidad de los volantes, como consecuencia, cada
# estado debe completarse, antes de poder psara al siguiente
#
# reglas sobre el estado
# operaciones en el estado
# salidas en los estados
#
# transcisiones de estados
#
if (self.state > 0 and selg.signal == 0):
self.state = -1
self.task_done = False
self.task_time = 0
if (self.state == -1 and self.task_done):
self.state = 0
if (self.state == 0 and self.signal == 1):
self.state = 1
self.task_done = False
self.task_time = 0
if (self.state == 1 and self.task_done):
self.state = 2
#
# acciones dentro de los estados
#
if (self.state == -1):
# secuencia de apagado, que dure por lo menos cuatro veces el tiempo de encendido
if (self.vol1 == 0 and self.vol2 == 0):
self.task_done = True
#
self.vol1 = constrain(self.vol1 - 100 * self.tick_time / ( 4. * self.rise_time), 0, 100)
self.vol2 = constrain(self.vol2 - 100 * self.tick_time / ( 4. * self.rise_time), 0, 100)
#
if (self.state == 0):
# estado de espera, no hacer nada, quiza mandar a cero los volantes
self.vol1 = 0
self.vol2 = 0
#
if (self.state == 1):
# encendiendo
if (self.vol1 == self.signal and self.vol2 == self.signal):
self.task_done = True
#
self.vol1 = constrain(self.vol1 + self.signal * self.tick_time / (self.rise_time), 0, self.signal)
self.vol2 = constrain(self.vol2 + self.signal * self.tick_time / (self.rise_time), 0, self.signal)
if (self.state == 2):
# encendido, quiza publicar el valor de senal constantemente
self.vol1 = self.signal
self.vol2 = self.signal
#
# salidas, independientes de los estados pero quiza dependientes de modos de operacion
#
# la publicacion se hace de manera continua, no tiene porque ser asi, pero si se agregase algun modo
# de operacion diferente, por ejemplo, para pruebas manuales, la division de modos de salida se haria aqui pero
# la maquina de estados y demas seguiria funcionando, aunque no se le de salida
#
self.vol1Pub.publish(self.vol1 * 100)
self.vol2Pub.publish(self.vol2 * 100)
#
def spin(self):
#
r = rospy.Rate(self.rate)
while not rospy.is_shutdown():
self.update()
r.sleep()
#
if __name__ == '__main__':
""" main """
c_esc_vols = Control_ESC_Volantes()
c_esc_vols.spin()
|
[
"jakob.culebro@gmail.com"
] |
jakob.culebro@gmail.com
|
271216a877846152d2881e55d030fd1daa1812ed
|
c579690f06b4dd558f6e5bfa52d4819f4be163fc
|
/training.py
|
0ce714070ccb5a3e7f09ed393bbdc44bb31951b4
|
[] |
no_license
|
soul0101/Face-Recognition
|
f779902221ea017f6edbefdd08c3f74f7372d973
|
644af7a2cdab2a5d4172771242cbbaabd8fe3221
|
refs/heads/main
| 2023-02-06T07:43:27.939620
| 2020-12-29T11:17:55
| 2020-12-29T11:17:55
| 325,259,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
from imutils import paths
import face_recognition
import pickle
import cv2
import os
imagePaths = list(paths.list_images('known'))
knownEncodings = []
knownNames = []
for (i,imagePath) in enumerate(imagePaths):
name = imagePath.split(os.path.sep)[-2]
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
box = face_recognition.face_locations(rgb,model="hog")
encodings = face_recognition.face_encodings(rgb,box)
for encoding in encodings:
knownEncodings.append(encoding)
knownNames.append(name)
data={"encodings":knownEncodings, "name":knownNames}
f=open("face_enc","wb")
f.write(pickle.dumps(data))
f.close()
|
[
"53980340+soul0101@users.noreply.github.com"
] |
53980340+soul0101@users.noreply.github.com
|
9a70945fcd36587b08508b68dc3d8af4d9b59fff
|
342a9f6fe8012e21321344e3c0c61d46fdb3866a
|
/app/admin.py
|
413b49bd61cd23954b090e50d59c5e0c901bc100
|
[
"MIT"
] |
permissive
|
GDGVIT/hestia-requests
|
1175010e33ada572a10fe9766715149d534ed46b
|
666a8b818cccbf968ac8d43d9ced6dba8fd458d5
|
refs/heads/master
| 2021-04-24T02:41:51.012555
| 2020-07-18T14:54:57
| 2020-07-18T14:54:57
| 250,062,825
| 0
| 0
|
MIT
| 2021-02-20T07:48:32
| 2020-03-25T18:46:57
|
CSS
|
UTF-8
|
Python
| false
| false
| 264
|
py
|
from django.contrib import admin
from .models import ItemRequest, Accepts, Organizations, AreasCatered
# Register your models here.
admin.site.register(ItemRequest)
admin.site.register(Accepts)
admin.site.register(Organizations)
admin.site.register(AreasCatered)
|
[
"mail2riddhigupta@gmail.com"
] |
mail2riddhigupta@gmail.com
|
303d2e67444557cb4fd051f1250a360cb9ef821c
|
892c7bd301eeadf57b546f039faf499448112ddc
|
/organizacion/migrations/0004_escuelacampo.py
|
0f8af8ca46dffde4815519fa6295128bd78c2024
|
[
"MIT"
] |
permissive
|
ErickMurillo/aprocacaho
|
beed9c4b031cf26a362e44fc6a042b38ab246c27
|
eecd216103e6b06e3ece174c89d911f27b50585a
|
refs/heads/master
| 2022-11-23T15:03:32.687847
| 2019-07-01T19:16:37
| 2019-07-01T19:16:37
| 53,867,804
| 0
| 1
|
MIT
| 2022-11-22T01:02:51
| 2016-03-14T15:23:39
|
HTML
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-04 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizacion', '0003_auto_20160803_2128'),
]
operations = [
migrations.CreateModel(
name='EscuelaCampo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=200)),
('organizacion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organizacion.Organizacion')),
],
options={
'verbose_name': 'Escuela de campo',
'verbose_name_plural': 'Escuelas de campo',
},
),
]
|
[
"erickmurillo22@gmail.com"
] |
erickmurillo22@gmail.com
|
a62227d3bca73730eaa42b775ca255d0a71fa6ed
|
b558b1479cf5a8a496a5b580b9107632d7ad47f7
|
/producers/connector.py
|
78ccce61f1b97194d26254b4998fd6003136905a
|
[
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"Zlib",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"BSD-3-Clause",
"OpenSSL",
"MIT"
] |
permissive
|
purbashacg9/opt-public-transportation
|
7a850b554a0fa444406077dd022de9bc14f3bedb
|
2264a32b6f2845993b3b5a74d2d0166e286820ff
|
refs/heads/master
| 2023-08-23T20:42:49.376506
| 2020-04-12T08:00:03
| 2020-04-12T08:00:03
| 248,123,580
| 0
| 0
|
NOASSERTION
| 2023-08-14T22:09:30
| 2020-03-18T02:50:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,869
|
py
|
"""Configures a Kafka Connector for Postgres Station data"""
import json
import logging
import requests
logger = logging.getLogger(__name__)
KAFKA_CONNECT_URL = "http://localhost:8083/connectors"
CONNECTOR_NAME = "postgres-source-stations"
def configure_connector():
"""Starts and configures the Kafka Connect connector"""
logging.info("creating or updating kafka connect connector...")
resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}")
if resp.status_code == 200:
logging.info("connector already created skipping recreation")
return
logger.info("Setting up JDBC source connector")
resp = requests.post(
KAFKA_CONNECT_URL,
headers={"Content-Type": "application/json"},
data=json.dumps({
"name": CONNECTOR_NAME,
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"key.converter.schemas.enable": "false",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
"batch.max.rows": "500",
"connection.url": "jdbc:postgresql://localhost:5432/cta",
"connection.user": "cta_admin",
"connection.password": "chicago",
"table.whitelist": "stations",
"mode": "incrementing",
"incrementing.column.name": "stop_id",
"topic.prefix": "org.chicago.stations.table.connect-",
"poll.interval.ms": 60000,
}
}),
)
## Ensure a healthy response was given
resp.raise_for_status()
logging.info("connector created successfully")
if __name__ == "__main__":
configure_connector()
|
[
"purbasha@paradata.io"
] |
purbasha@paradata.io
|
9100181f81588e9336f50a9dee86c249eb248cff
|
74a135972a09870e51337c3be4afb3abfd9c43c7
|
/TableMultiplication2.py
|
814492e12d9cfacf27c6bb8f0e285e7e3e117a80
|
[] |
no_license
|
Sonnelon888/pythonStudy
|
1b94307238c27256ad889998e9b355660692f9ec
|
5f5ce95838c30b1a23272a49784c66517b1c0a9c
|
refs/heads/master
| 2020-06-07T08:50:02.507380
| 2019-06-20T20:43:23
| 2019-06-20T20:43:23
| 192,979,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
a, b, c, d = (int(input()) for x in range(4))
print('', *range(c,d+1), sep='\t')
for x in range(a, b+1):
print(x, *[y*x for y in range(c, d+1)], sep='\t')
|
[
"S.Mokrov@gmail.com"
] |
S.Mokrov@gmail.com
|
1b595d943f2f3026a02236c4b6a6caade13ea718
|
ea6b3b74c8f1ff9333c5d4b06a0e4dd9bbdb3bba
|
/tests/protocol/test_json_protocol.py
|
5a79da739005fd27e01a88e27d335942a77b03c2
|
[
"MIT"
] |
permissive
|
sgalkina/venom
|
d495d296a388afcb25525491bbbe590bfd258a05
|
e372ab9002e71ba4e2422aabd02143e4f1247dba
|
refs/heads/master
| 2021-01-23T03:27:17.239289
| 2017-03-24T15:05:56
| 2017-03-24T15:05:56
| 86,077,951
| 0
| 0
| null | 2017-03-24T14:40:46
| 2017-03-24T14:40:46
| null |
UTF-8
|
Python
| false
| false
| 5,448
|
py
|
from unittest import SkipTest
from unittest import TestCase
from venom import Message
from venom.common import StringValue, IntegerValue, BoolValue, NumberValue
from venom.exceptions import ValidationError
from venom.fields import String, Number, Field, Repeat
from venom.protocol import JSON
class Foo(Message):
string = String()
parent = Field('tests.protocol.test_json_protocol.Foo')
string_value = Field(StringValue)
class JSONProtocolTestCase(TestCase):
def test_encode_message(self):
class Pet(Message):
sound = String()
protocol = JSON(Pet)
self.assertEqual(protocol.encode(Pet('hiss!')), {'sound': 'hiss!'})
self.assertEqual(protocol.decode({'sound': 'meow'}), Pet('meow'))
self.assertEqual(protocol.decode({}), Pet())
with self.assertRaises(ValidationError) as e:
protocol.decode('bad')
self.assertEqual(e.exception.description, "'bad' is not of type 'object'")
self.assertEqual(e.exception.path, [])
@SkipTest
def test_encode_message_field_attribute(self):
# NOTE: removed support for field attributes.
class Pet(Message):
size = Number(attribute='weight')
protocol = JSON(Pet)
pet = Pet()
pet.size = 2.5
self.assertEqual(protocol.encode(pet), {'weight': 2.5})
self.assertEqual(protocol.decode({'weight': 2.5}), Pet(2.5))
def test_encode_repeat_field(self):
class Pet(Message):
sounds = Repeat(String())
protocol = JSON(Pet)
self.assertEqual(protocol.encode(Pet(['hiss!', 'slither'])), {'sounds': ['hiss!', 'slither']})
self.assertEqual(protocol.decode({'sounds': ['meow', 'purr']}), Pet(['meow', 'purr']))
self.assertEqual(protocol.decode({}), Pet())
self.assertEqual(protocol.encode(Pet()), {})
with self.assertRaises(ValidationError) as e:
protocol.decode({'sounds': 'meow, purr'})
self.assertEqual(e.exception.description, "'meow, purr' is not of type 'list'")
self.assertEqual(e.exception.path, ['sounds'])
def test_validation_field_string(self):
class Foo(Message):
string = String()
protocol = JSON(Foo)
with self.assertRaises(ValidationError) as e:
protocol.decode({'string': None})
self.assertEqual(e.exception.description, "None is not of type 'str'")
self.assertEqual(e.exception.path, ['string'])
def test_validation_path(self):
protocol = JSON(Foo)
with self.assertRaises(ValidationError) as e:
protocol.decode({'string': 42})
self.assertEqual(e.exception.description, "42 is not of type 'str'")
self.assertEqual(e.exception.path, ['string'])
# FIXME With custom encoding/decoding for values this won't happen.
with self.assertRaises(ValidationError) as e:
protocol.decode({'string_value': {'value': None}})
self.assertEqual(e.exception.description, "{'value': None} is not of type 'str'")
self.assertEqual(e.exception.path, ['string_value'])
with self.assertRaises(ValidationError) as e:
protocol.decode({'parent': {'string_value': 42}})
self.assertEqual(e.exception.description, "42 is not of type 'str'")
self.assertEqual(e.exception.path, ['parent', 'string_value'])
def test_unpack_invalid_json(self):
class Pet(Message):
sound = String()
protocol = JSON(Pet)
with self.assertRaises(ValidationError) as e:
protocol.unpack(b'')
self.assertEqual(e.exception.description, "Invalid JSON: Expected object or value")
self.assertEqual(e.exception.path, [])
with self.assertRaises(ValidationError) as e:
protocol.unpack(b'fs"ad')
def test_pack(self):
class Pet(Message):
sound = String()
protocol = JSON(Pet)
self.assertEqual(protocol.pack(Pet()), b'{}')
self.assertEqual(protocol.pack(Pet('hiss!')), b'{"sound":"hiss!"}')
def test_string_value(self):
protocol = JSON(StringValue)
self.assertEqual(protocol.encode(StringValue('hiss!')), 'hiss!')
self.assertEqual(protocol.decode('hiss!'), StringValue('hiss!'))
self.assertEqual(protocol.pack(StringValue()), b'""')
self.assertEqual(protocol.pack(StringValue('hiss!')), b'"hiss!"')
with self.assertRaises(ValidationError):
protocol.decode(42)
def test_integer_value(self):
protocol = JSON(IntegerValue)
self.assertEqual(protocol.encode(IntegerValue(2)), 2)
self.assertEqual(protocol.decode(2), IntegerValue(2))
with self.assertRaises(ValidationError):
protocol.decode('hiss!')
def test_number_value(self):
protocol = JSON(NumberValue)
self.assertEqual(protocol.encode(NumberValue(2.5)), 2.5)
self.assertEqual(protocol.decode(2.5), NumberValue(2.5))
with self.assertRaises(ValidationError):
protocol.decode('hiss!')
def test_bool_value(self):
protocol = JSON(BoolValue)
self.assertEqual(protocol.encode(BoolValue()), False)
self.assertEqual(protocol.encode(BoolValue(True)), True)
self.assertEqual(protocol.decode(False), BoolValue(False))
with self.assertRaises(ValidationError):
protocol.decode('hiss!')
|
[
"lars@lyschoening.de"
] |
lars@lyschoening.de
|
5dc983fbe66e93faf6d6af58d14ac9e0e48ffc33
|
5cf182d4d0e4e83874f04402beea6280361f9155
|
/Array_1.py
|
9af72d107eb9b89343ca7f55a95c13253164738e
|
[] |
no_license
|
nehaa8t/PythinBasic
|
c042fdd70157911cb0c74a2b0fe15c22e793cc92
|
eda857376191e1314f5b31a904ca23be81ef31cc
|
refs/heads/main
| 2023-06-19T03:40:17.757892
| 2021-07-13T09:44:09
| 2021-07-13T09:44:09
| 376,550,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
'''
#WAP : create an array of 5 integers and display the array items.
Access individual element through indexes
'''
import array as arr
a=arr.array('i',[1,2,3,4,5])
for i in range(0,3):
print(a[i])
print("===========")
for i in a:
print(i)
# Append a new item to the end of the array.
a=arr.array("i",[1,3,9,2])
a.append(11)
print(a)
#Reverse the order of the items in the array.
a1=arr.array("i",[1,3,9,2])
a1.reverse()
print(str(a1))
#==============================
#get the number of occurrences of a specified element in an array
Ori_arr = arr.array('i',[1, 3, 5, 3, 7, 9, 3,3])
z = Ori_arr.count(3)
print(z)
# 9 : append items from a specified list.
num_list = [1, 2, 6, -8]
array_num = arr.array('i', [])
print("Items in the list: " + str(num_list))
print("Append items from the list: " ,array_num)
array_num.fromlist(num_list)
print("=======insert a new item before the second element in an existing array.=====")
# 10 . insert a new item before the second element in an existing array.
num_list = [1, 2, 6, -8]
'============array int,pass the list =================='
covt_to_array = arr.array('i',num_list)
print(str(covt_to_array))
covt_to_array.insert(1,4)
print(str(covt_to_array))
print("==============remove a specified item using the index from an array======")
covt_to_array = arr.array('i',num_list)
covt_to_array.pop(1)
print(str(covt_to_array))
num_list=covt_to_array.tolist()
print(num_list)
print("13 .==============remove the first occurrence of a specified element from an array======")
|
[
"noreply@github.com"
] |
noreply@github.com
|
2ebfb27d864daa8609758160bd3ee3c6122b704a
|
e147827b4f6fbc4dd862f817e9d1a8621c4fcedc
|
/apps/doc/views.py
|
ab450d855ef75b700dd41676295f6518652efa34
|
[] |
no_license
|
Jsummer121/DjangoBolgProject
|
ba3ebe27a1ac67439de67b9f10c17d1c16e43f84
|
d64f9579d29ac5e3979d40303e84f4be6852fa96
|
refs/heads/master
| 2023-01-30T16:26:33.566665
| 2020-12-15T12:00:16
| 2020-12-15T12:00:16
| 321,654,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
from django.shortcuts import render
from django.views import View
from django.http import HttpResponse, FileResponse, StreamingHttpResponse, Http404
from doc import models
import requests
from django.utils.encoding import escape_uri_path
from Django_pro.settings import DOC_FILE_URL
def doc(request):
docs = models.Doc.objects.only('image_url', 'desc', 'title').filter(is_delete=False)
return render(request, 'doc/docDownload.html', context={'docs': docs})
class DocDownload(View):
def get(self, request, doc_id):
doc_file = models.Doc.objects.only('file_url').filter(is_delete=False, id=doc_id).first()
if doc_file:
# /media/流畅的Python.pdf
doc_url = doc_file.file_url
# http://192.168.216.137:8000/media/流畅的Python.pdf
doc_url = DOC_FILE_URL + doc_url
# a = requests.get(doc_url)
# res = HttpResponse(a) #下面是简写
res = FileResponse(requests.get(doc_url))
ex_name = doc_url.split('.')[-1] # pdf
if not ex_name:
raise Http404('文件名异常')
else:
ex_name = ex_name.lower()
if ex_name == 'pdf':
res['Content-type'] = 'application/pdf'
elif ex_name == 'doc':
res['Content-type'] = 'application/msowrd'
elif ex_name == 'ppt':
res['Content-type'] = 'application/powerpoint'
else:
raise Http404('文件格式不正确')
doc_filename = escape_uri_path(doc_url.split('/')[-1])
# attachment 保存 inline 显示
res["Content-Disposition"] = "attachment; filename*=UTF-8''{}".format(doc_filename)
return res
else:
raise Http404('文档不存在')
|
[
"jxxaizwt@icloud.com"
] |
jxxaizwt@icloud.com
|
3784053c40cb29135e59b0712ee61058d50b15d1
|
9ba90102dd6cd71c895950f501912a998374266e
|
/ServiciosTecnicos/GestorEntradasSalidas.py
|
24115767e4442532b52e3ca0a439c0d3681efe87
|
[] |
no_license
|
ajason08/SoftNLP
|
88925811101d0ca4e1ba056f3b87ea251475d3fd
|
c19a99ccb7234dd08a2485390dfc94f0336faa7f
|
refs/heads/master
| 2021-01-10T14:58:08.085104
| 2016-05-05T18:44:15
| 2016-05-05T18:44:15
| 55,006,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,566
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import codecs
import re
import xlrd
import xlwt
from pandas import *
import prettytable
from operator import itemgetter
def cargarArchivoTaggerTNT(archivo):
palabras = []
categorias = []
lemas = []
with open(archivo, 'r') as f:
myString = f.read() # con el decode(latin-1) reemplaza 'í' i tildada por 'Ã'
b= myString.split('\n')
i=0
if b[-1].split('\t')==['']: # se elimina si al final no saco la ultima linea completa
b=b[0:-1]
for x in b:
t=1
bb= x.split('\t')
if len(bb)<>3:
print "aqui esta", bb
for xx in bb:
# print "celda", xx, i
if i%3==0:
palabras.append(xx)
elif i%3==1:
categorias.append(xx)
elif i%3==2:
lemas.append(xx) # al imprimir cada una la saca bien, centímetros
i=i+1
return palabras, categorias, lemas
def cargarcsv(archivo, signoSeparacion=","):
with codecs.open(archivo,'r',encoding='iso-8859-1') as f:
myString = f.read() # con el decode(latin-1) reemplaza 'í' i tildada por 'Ã'
b= myString.split(signoSeparacion)
return b
def exportarExcelClasificacion(urls, clasificacion, nombre="outputClasificacion.xls"):
style0 = xlwt.easyxf('font: name Times New Roman, colour red, bold on')
wb = xlwt.Workbook()
ws = wb.add_sheet('A Test Sheet',cell_overwrite_ok=True)
ws.write(0, 0, 'LINK', style0)
ws.write(0, 1, 'CLASIFICACION', style0)
for i in range (len(urls)):
#ws.write(i+1, 0, articulos[i])
ws.write(i+1, 0, urls[i])
ws.write(i+1, 1, clasificacion[i])
wb.save(nombre)
print "Archivo exportado como", nombre
def exportarExcel(dataframe, nombreArchivoExcel="outputDataframe.xls"):
style0 = xlwt.easyxf('font: name Times New Roman, colour red, bold on')
nombres = list(dataframe.columns.values)
wb = xlwt.Workbook(encoding='latin-1')
ws = wb.add_sheet('my Sheet',cell_overwrite_ok=True)
for i in range (len(nombres)):
nom = nombres[i]
ws.write(0, i, nom, style0)
#nose si no muestra el ultimo
for filai in range (1,len(dataframe.axes[0])):
for columnaj in range (len(nombres)):
col = nombres[columnaj]
a = (dataframe.get_value(dataframe.index[filai],col))
if len(a)>30000:
a = "muy largo"
ws.write(filai, columnaj,a)
wb.save(nombreArchivoExcel)
print "Archivo exportado como", nombreArchivoExcel
def exportarMatrizCSV(dataframe, nombreArchivo="outputDataframe.csv", separadorCol = "$$", separadorFilas = "\n"):
nombres = list(dataframe.columns.values)
quo = "<quote>"
outputArt=open(nombreArchivo,"w")
outputArt.close() # Reinicio txt
outputArt = open(nombreArchivo,"a")
#guardo cabecera de la tabla
for nomColi in range (len(nombres)):
nom = quo+nombres[nomColi]+quo+separadorCol
outputArt.write(nom)
outputArt.write(separadorFilas)
for filai in range (0,len(dataframe.axes[0])):
for nomColi in range (len(nombres)):
nom = nombres[nomColi]
valorCelda = quo+dataframe.get_value(dataframe.index[filai],nom)+quo+separadorCol
outputArt.write(valorCelda)
outputArt.write(separadorFilas)
outputArt.close()
def cargarLematizacion2(archivo):
tagger = []
with open(archivo, 'r') as f:
myString = f.read() # con el decode(latin-1) reemplaza 'í' i tildada por 'Ã'
b= myString.split('\n')
i=0
for x in b:
bb= x.split('\t')
tagger.append(bb)
return tagger
def ordenarTabla(columnas,c):
#ordena una matriz procedente de vectores de la forma [vector0, vector1, vectorn],
#c es el numero del vector por el cual se ordenara la matriz
if not c in range(len(columnas)):
print "Excepcion c is not in range of matrix"
return None
nroFilas=len(columnas[0])
matriz = []
for i in range(nroFilas):
fila= []
for columna in columnas:
fila.append(columna[i])
matriz.append(fila)
x = sorted(matriz, key=itemgetter(c))
return x
def cargarColumnaEnLista(excel, hoja, columna, filainicial=0, filaLimite=0):
# dado un archivo excel, una hoja y una columna(inicia en 0) se cargaran los datos en una
# hasta llegar a una celda vacia o hasta la fila limite establecida
doc = xlrd.open_workbook(excel)
sheet = doc.sheet_by_index(hoja)
if filaLimite == 0:
nrows = sheet.nrows
filaLimite = nrows
lista = []
for i in range(filainicial, filaLimite):
#print filaLimite
try:
celda = sheet.cell_value(i,columna).__str__()
if celda == '': # una celda vacia indica que la columna no tiene mas valores
break
lista.append(celda.encode())
except:
celda= sheet.cell_value(i, columna)
#print celda, "execepcion!!", sys.exc_info()[0]
#continue
if celda=='': # una celda vacia indica que la columna no tiene mas valores
break
lista.append(celda.encode('utf-8'))# el encode lo agregue para que no saque las palabras tildadas como unicode
return lista
def is_ascii(s):
try:
return all(ord(c) < 128 for c in s)
except TypeError:
return False
|
[
"ajason08@gmail.com"
] |
ajason08@gmail.com
|
a46a60c0e2600f4a1681c7b0350edead08cf76fd
|
0ff26d3911d405e730910682b4ad62211d9f14a1
|
/server/lib/cache.py
|
d479fc52572f71e56bdb665049fd6e4d8a3da25a
|
[] |
no_license
|
jzitnik/z-pim
|
975ef7af3cd1fb3afcea252cb19695603c9908fd
|
6b57d27a04eda3892639935a59970cc7e17fe756
|
refs/heads/master
| 2016-09-05T19:13:14.377173
| 2012-12-04T19:11:11
| 2012-12-04T19:11:11
| 7,005,253
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
class Cache(object):
def __call__(self):
print "CACHE INIT"
class SessionCached(Cache):
def __call__(self):
print "sess CACHE INIT"
|
[
"jan@zitnik.org"
] |
jan@zitnik.org
|
a1b2d1e62a5a9c0b2e499246d138951a2a9f20f9
|
64a80df5e23b195eaba7b15ce207743e2018b16c
|
/Downloads/adafruit-circuitpython-bundle-py-20201107/lib/adafruit_wsgi/wsgi_app.py
|
44171f51cfa9e62e9b7fdc09a66fc806d95d7b4a
|
[] |
no_license
|
aferlazzo/messageBoard
|
8fb69aad3cd7816d4ed80da92eac8aa2e25572f5
|
f9dd4dcc8663c9c658ec76b2060780e0da87533d
|
refs/heads/main
| 2023-01-27T20:02:52.628508
| 2020-12-07T00:37:17
| 2020-12-07T00:37:17
| 318,548,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,807
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Matthew Costi for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`wsgi_app`
================================================================================
CircuitPython framework for creating WSGI server compatible web applications.
This does *not* include server implementation, which is necessary in order
to create a web application with this library.
* Circuit Python implementation of an WSGI Server for ESP32 devices:
https://github.com/adafruit/Adafruit_CircuitPython_ESP32SPI.git
* Author(s): Matthew Costi
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import re
from adafruit_wsgi.request import Request
__version__ = "1.1.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_WSGI.git"
class WSGIApp:
"""
The base WSGI Application class.
"""
def __init__(self):
self._routes = []
self._variable_re = re.compile("^<([a-zA-Z]+)>$")
def __call__(self, environ, start_response):
"""
Called whenever the server gets a request.
The environ dict has details about the request per wsgi specification.
Call start_response with the response status string and headers as a list of tuples.
Return a single item list with the item being your response data string.
"""
status = ""
headers = []
resp_data = []
request = Request(environ)
match = self._match_route(request.path, request.method.upper())
if match:
args, route = match
status, headers, resp_data = route["func"](request, *args)
start_response(status, headers)
return resp_data
def on_request(self, methods, rule, request_handler):
"""
Register a Request Handler for a particular HTTP method and path.
request_handler will be called whenever a matching HTTP request is received.
request_handler should accept the following args:
(Dict environ)
request_handler should return a tuple in the shape of:
(status, header_list, data_iterable)
:param list methods: the methods of the HTTP request to handle
:param str rule: the path rule of the HTTP request
:param func request_handler: the function to call
"""
regex = "^"
rule_parts = rule.split("/")
for part in rule_parts:
var = self._variable_re.match(part)
if var:
# If named capture groups ever become a thing, use this regex instead
# regex += "(?P<" + var.group("var") + r">[a-zA-Z0-9_-]*)\/"
regex += r"([a-zA-Z0-9_-]+)\/"
else:
regex += part + r"\/"
regex += "?$" # make last slash optional and that we only allow full matches
self._routes.append(
(re.compile(regex), {"methods": methods, "func": request_handler})
)
def route(self, rule, methods=None):
"""
A decorator to register a route rule with an endpoint function.
if no methods are provided, default to GET
"""
if not methods:
methods = ["GET"]
return lambda func: self.on_request(methods, rule, func)
def _match_route(self, path, method):
for matcher, route in self._routes:
match = matcher.match(path)
if match and method in route["methods"]:
return (match.groups(), route)
return None
|
[
"aferlazzo@gmail.com"
] |
aferlazzo@gmail.com
|
058e0ab5091f1f7f83fac5da38065560b7b2b934
|
d40994586c94638aa04c1917c70614b7a8e27de1
|
/prize_server/model/base.py
|
11bd2e5c802425e31ca087ea2fae5f146f9878b3
|
[] |
no_license
|
XTAYJGDUFVF/prize_server
|
3b506d39ae1b260933160dbf61726731788585b0
|
b4b60a33028ae1c638b8051304576f2b9e4630aa
|
refs/heads/master
| 2021-08-07T12:47:12.064557
| 2017-11-06T10:39:02
| 2017-11-06T10:39:02
| 109,638,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,366
|
py
|
# coding=utf-8
import functools
import aiomysql
import pytds
from tornado.gen import coroutine, sleep
from tornado.log import app_log
from config import Config
from util.struct import Ignore
from util.util import Singleton, Utils
from util.cache import MCachePool as MCachePoolABC, config_redis_default
from util.mem_cache import func_cache as original_func_cache
from util.database import MySQLPoolABC
class MCachePool(Singleton, MCachePoolABC):
def __init__(self):
addr = Config.RedisHost
settings = {
r'db': Config.RedisBase,
r'minsize': Config.RedisMinConn,
r'maxsize': Config.RedisMaxConn,
r'password': Config.RedisPasswd
}
MCachePoolABC.__init__(self, addr, settings)
class MySQLPool(Singleton, MySQLPoolABC):
def __init__(self):
addr = Config.MySqlMaster
settings = {
r'user': Config.MySqlUser,
r'password': Config.MySqlPasswd,
r'minsize': Config.MySqlMinConn,
r'maxsize': Config.MySqlMaxConn,
r'db': Config.MySqlDbName,
r'charset': r'utf8',
r'cursorclass': aiomysql.DictCursor,
}
MySQLPoolABC.__init__(self, addr, **settings)
@coroutine
def initialize():
Utils._HTTP_ERROR_AUTO_RETRY = Config.HttpErrorAutoRetry
Utils._HTTP_CONNECT_TIMEOUT = Config.HttpConnectTimeout
Utils._HTTP_REQUEST_TIMEOUT = Config.HttpRequestTimeout
yield MySQLPool().initialize()
yield MCachePool().initialize()
config_redis_default(expire=Config.RedisExpires, key_prefix=r'account_service__')
class BaseModel(Singleton, Utils):
def __init__(self):
self._db_pool = MySQLPool()
self._cache_pool = MCachePool()
def get_db_client(self, readonly=False):
return self._db_pool.get_client(readonly)
def get_db_transaction(self):
return self._db_pool.get_transaction()
def get_mssql_conn(self, database_name):
host, port = Config.MssqlHost
conn = pytds.connect(host, database_name, Config.MssqlUser, Config.MssqlPasswd, port=port, as_dict=True)
# mssql log
app_log.info(r'mssql connect to {}'.format(database_name))
def conn_close(conn):
type(conn).close(conn)
app_log.info(r'mssql conn closed')
import functools
conn.close = functools.partial(conn_close, conn)
return conn
def get_cache_client(self):
return self._cache_pool.get_client()
def Break(self, msg=None):
raise Ignore(msg)
@coroutine
def acquire_lock(self, key, retry_interval=0.1, max_hold_time=Config.DistributedLockMaxHoldTime, timeout=Config.AcquireDistributedLockTimeout):
"""
key: 分布式锁唯一标示
retry_interval: 尝试获取锁的时间间隔
max_hold_time: 最长持有时间,超时自动释放,为防止极端情况下锁被永久持有的问题
timeout: 获取锁超时时间
使用样例:
# self 是BaseModel实例
lname = r'some_lock_name'
lock = yield self.acquire_lock(lname)
if lock:
try:
yield sleep(5)
1 / 0
yield sleep(5)
finally:
yield self.release_lock(lname)
else:
pass
"""
app_log.debug(r'try acquire lock {}'.format(key))
cache = self.get_cache_client()
acquire_time_total = 0
timeout_time = 0
begin_time = self.timestamp()
while not (yield cache.setnx(key, self.timestamp())):
acquire_time_total += retry_interval
if acquire_time_total > timeout:
timeout_time = self.timestamp()
break
yield sleep(retry_interval)
if timeout_time:
app_log.warning(r'acquire lock {} timeout, cost {} s'.format(key, timeout_time - begin_time))
return False
app_log.debug(r'acquire lock success {}'.format(key))
yield cache.expire(key, max_hold_time)
return True
@coroutine
def release_lock(self, key):
cache = self.get_cache_client()
yield cache.delete(key)
app_log.debug(r'release lock {}'.format(key))
|
[
"963480530@qq.com"
] |
963480530@qq.com
|
3ce3fa355f4726bdb74170b7e80c7359ff504db8
|
915aa339b11a68dcf67eabb22def07710c0527c1
|
/src/result/Result.py
|
1d94ff6b9e785cb5da52fcb9731ec570bafb7d96
|
[
"MIT"
] |
permissive
|
charleshamel73/diceroller
|
ead233aead53a42bafae2226db32613cbb3c1830
|
83cf43922a59e07203b50372804ba79bdfde9940
|
refs/heads/master
| 2021-01-17T17:46:55.385189
| 2016-06-24T15:51:33
| 2016-06-24T15:51:33
| 60,239,762
| 0
| 0
| null | 2016-06-16T03:49:15
| 2016-06-02T06:53:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
class Result(object):
sum = 0
rolls = []
message = ""
def __init__(self, sum, roll=None,message=None):
self.sum = sum
if roll is not None:
self.rolls = roll
if message is not None:
self.message = message
def merge_results(self, result, type,operator_message):
from src.operator.Multiply import Multiply
from src.operator.Addition import Addition
from src.operator.Divide import Divide
from src.operator.Subtraction import Subtraction
if isinstance(type, Addition):
self.sum += result.sum
elif isinstance(type, Subtraction):
self.sum -= result.sum
elif isinstance(type, Multiply):
self.sum *= result.sum
elif isinstance(type, Divide):
self.sum /= result.sum
else:
raise TypeError("TYPE ERROR: Results can only be of type Operator. Found Type %s" % type.__class__)
self.message += result.message
self.message += operator_message
return self
#TODO: Create merge roll
#TODO: Create set message??
#TODO: merge message?
|
[
"charleshamel73@yahoo.com"
] |
charleshamel73@yahoo.com
|
3129d119bb1773e4909ac9e1ecf759cef0cad06e
|
539789516d0d946e8086444bf4dc6f44d62758c7
|
/inference/python/inference.py
|
7fc210e7978a31556f20ba12d8a1baa22d2ff6c4
|
[] |
no_license
|
hoangcuong2011/etagger
|
ad05ca0c54f007f54f73d39dc539c3737d5acacf
|
611da685d72da207870ddb3dc403b530c859d603
|
refs/heads/master
| 2020-05-03T15:15:33.395186
| 2019-03-28T01:40:21
| 2019-03-28T01:40:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,683
|
py
|
from __future__ import print_function
import sys
import os
path = os.path.dirname(os.path.abspath(__file__)) + '/../..'
sys.path.append(path)
import time
import argparse
import tensorflow as tf
import numpy as np
# for LSTMBlockFusedCell(), https://github.com/tensorflow/tensorflow/issues/23369
tf.contrib.rnn
# for QRNN
try: import qrnn
except: sys.stderr.write('import qrnn, failed\n')
from embvec import EmbVec
from config import Config
from token_eval import TokenEval
from chunk_eval import ChunkEval
from input import Input
def load_frozen_graph(frozen_graph_filename, prefix='prefix'):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
op_dict=None,
producer_op_list=None,
name=prefix,
)
return graph
def inference(config, frozen_pb_path):
"""Inference for bucket
"""
# load graph
graph = load_frozen_graph(frozen_pb_path)
for op in graph.get_operations():
sys.stderr.write(op.name + '\n')
# create session with graph
# if graph is optimized by tensorRT, then
# from tensorflow.contrib import tensorrt as trt
# gpu_ops = tf.GPUOptions(per_process_gpu_memory_fraction = 0.50)
gpu_ops = tf.GPUOptions()
'''
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_ops)
'''
session_conf = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False,
gpu_options=gpu_ops,
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
sess = tf.Session(graph=graph, config=session_conf)
# mapping placeholders and tensors
p_is_train = graph.get_tensor_by_name('prefix/is_train:0')
p_sentence_length = graph.get_tensor_by_name('prefix/sentence_length:0')
p_input_data_pos_ids = graph.get_tensor_by_name('prefix/input_data_pos_ids:0')
p_input_data_chk_ids = graph.get_tensor_by_name('prefix/input_data_chk_ids:0')
p_input_data_word_ids = graph.get_tensor_by_name('prefix/input_data_word_ids:0')
p_input_data_wordchr_ids = graph.get_tensor_by_name('prefix/input_data_wordchr_ids:0')
t_logits_indices = graph.get_tensor_by_name('prefix/logits_indices:0')
t_sentence_lengths = graph.get_tensor_by_name('prefix/sentence_lengths:0')
num_buckets = 0
total_duration_time = 0.0
bucket = []
while 1:
try: line = sys.stdin.readline()
except KeyboardInterrupt: break
if not line: break
line = line.strip()
if not line and len(bucket) >= 1:
start_time = time.time()
# Build input data
inp = Input(bucket, config, build_output=False)
feed_dict = {p_input_data_pos_ids: inp.example['pos_ids'],
p_input_data_chk_ids: inp.example['chk_ids'],
p_is_train: False,
p_sentence_length: inp.max_sentence_length}
feed_dict[p_input_data_word_ids] = inp.example['word_ids']
feed_dict[p_input_data_wordchr_ids] = inp.example['wordchr_ids']
if 'elmo' in config.emb_class:
feed_dict[p_elmo_input_data_wordchr_ids] = inp.example['elmo_wordchr_ids']
if 'bert' in config.emb_class:
feed_dict[p_bert_input_data_token_ids] = inp.example['bert_token_ids']
feed_dict[p_bert_input_data_token_masks] = inp.example['bert_token_masks']
feed_dict[p_bert_input_data_segment_ids] = inp.example['bert_segment_ids']
if 'elmo' in config.emb_class:
feed_dict[p_bert_input_data_elmo_indices] = inp.example['bert_elmo_indices']
logits_indices, sentence_lengths = sess.run([t_logits_indices, t_sentence_lengths], feed_dict=feed_dict)
tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0])
for i in range(len(bucket)):
if 'bert' in config.emb_class:
j = inp.example['bert_wordidx2tokenidx'][0][i]
out = bucket[i] + ' ' + tags[j]
else:
out = bucket[i] + ' ' + tags[i]
sys.stdout.write(out + '\n')
sys.stdout.write('\n')
bucket = []
duration_time = time.time() - start_time
out = 'duration_time : ' + str(duration_time) + ' sec'
sys.stderr.write(out + '\n')
num_buckets += 1
total_duration_time += duration_time
if line : bucket.append(line)
if len(bucket) != 0:
start_time = time.time()
# Build input data
inp = Input(bucket, config, build_output=False)
feed_dict = {model.input_data_pos_ids: inp.example['pos_ids'],
model.input_data_chk_ids: inp.example['chk_ids'],
model.is_train: False,
model.sentence_length: inp.max_sentence_length}
feed_dict[model.input_data_word_ids] = inp.example['word_ids']
feed_dict[model.input_data_wordchr_ids] = inp.example['wordchr_ids']
if 'elmo' in config.emb_class:
feed_dict[model.elmo_input_data_wordchr_ids] = inp.example['elmo_wordchr_ids']
if 'bert' in config.emb_class:
feed_dict[model.bert_input_data_token_ids] = inp.example['bert_token_ids']
feed_dict[model.bert_input_data_token_masks] = inp.example['bert_token_masks']
feed_dict[model.bert_input_data_segment_ids] = inp.example['bert_segment_ids']
if 'elmo' in config.emb_class:
feed_dict[model.bert_input_data_elmo_indices] = inp.example['bert_elmo_indices']
logits_indices, sentence_lengths = sess.run([t_logits_indices, t_sentence_lengths], feed_dict=feed_dict)
tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0])
for i in range(len(bucket)):
if 'bert' in config.emb_class:
j = inp.example['bert_wordidx2tokenidx'][0][i]
out = bucket[i] + ' ' + tags[j]
else:
out = bucket[i] + ' ' + tags[i]
sys.stdout.write(out + '\n')
sys.stdout.write('\n')
duration_time = time.time() - start_time
out = 'duration_time : ' + str(duration_time) + ' sec'
tf.logging.info(out)
num_buckets += 1
total_duration_time += duration_time
out = 'total_duration_time : ' + str(total_duration_time) + ' sec' + '\n'
out += 'average processing time / bucket : ' + str(total_duration_time / num_buckets) + ' sec'
tf.logging.info(out)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--emb_path', type=str, help='path to word embedding vector + vocab(.pkl)', required=True)
parser.add_argument('--wrd_dim', type=int, help='dimension of word embedding vector', required=True)
parser.add_argument('--word_length', type=int, default=15, help='max word length')
parser.add_argument('--frozen_path', type=str, help='path to frozen model(ex, ./exported/ner_frozen.pb)', required=True)
args = parser.parse_args()
tf.logging.set_verbosity(tf.logging.INFO)
args.restore = None
config = Config(args, is_training=False, emb_class='glove', use_crf=True)
inference(config, args.frozen_path)
|
[
"hazzling@gmail.com"
] |
hazzling@gmail.com
|
1a55c8829671cc809a85bf7ae583aba04d596824
|
7ffbc81ef48926c45e5789c13d167b235c12b150
|
/rept.py
|
920953c9307d144d688061474229b57988486515
|
[] |
no_license
|
s16009/PythonTutorial
|
b214e2880e17fedd08cfdbf9bca7d45bde94a7c9
|
04fc1c5bea0f26ea0b7681de37451e1fc4f49f63
|
refs/heads/master
| 2020-12-24T05:40:28.395630
| 2016-08-09T03:03:56
| 2016-08-09T03:03:56
| 63,661,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
for x in range(1, 11):
# print(repr(x).rjust(2),repr(x*x).rjust(3), end=' ')
#print(repr(x*x*x*).rjust(4))
print('{0:2d} {1:3d} {2:4d}'.format(x,x*x,x*x*x))
|
[
"s16009@std.it-college.ac.jp"
] |
s16009@std.it-college.ac.jp
|
a71b79fa0a6d31dce4cb2c05890195cf13440df7
|
e03351079da9ca55bacdebb1c94612bdcf45b5a5
|
/allrest/apps.py
|
ab786fe241e2b1da78b3b5bf0c358e9efb632d61
|
[] |
no_license
|
Turza006/food
|
e3a5f202b90cfe2dbfeca3c1bec5008ffa151eed
|
1609bef7d67dee2a07190fadc8e67560a4a40ec5
|
refs/heads/master
| 2020-06-28T10:33:03.468298
| 2019-08-02T09:51:07
| 2019-08-02T09:51:07
| 200,210,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from django.apps import AppConfig
class AllrestConfig(AppConfig):
name = 'allrest'
|
[
"probaldattaturza@gmail.com"
] |
probaldattaturza@gmail.com
|
2af46188232c9dc83f27d7c1db55e7fba17bb0fc
|
719da91704a59ec2d64c0ef29a37eade897e566c
|
/Coffee2Go/Coffee/Lib/site-packages/genopandas/plotting/genomic.py
|
3adaf95bca2f792ad0efdbfd6c247bdac9f3c2d4
|
[] |
no_license
|
FeliciaWilliamson/Coffee2Go
|
7fa42ed3604fd9717f1ff68794ab7c7046b20bdd
|
f02c5d0232003c15a571fcadb528268bd4ff1c5b
|
refs/heads/master
| 2023-09-01T01:43:09.642547
| 2021-10-19T14:58:37
| 2021-10-19T14:58:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,924
|
py
|
"""Module containing functions for plotting data along a genomic axis."""
from itertools import chain
import numpy as np
import pandas as pd
import toolz
from .base import scatter_plot, step_plot, apply_palette
def genomic_scatter_plot(data,
y,
hue=None,
hue_order=None,
palette=None,
color=None,
chromosomes=None,
legend=True,
legend_kws=None,
ax=None,
style_axis=True,
**kwargs):
"""Plots genomic data along a chromosomal axis.
Parameters
----------
data : GenomicDataFrame
Genomic data to plot.
y, hue : str
Columns to use for plotting. ``y`` determines what is drawn on the
y-axis. If given, ``hue`` points are colored according to the
(categorical) values of the respective column. If hue == 'chromosome'
points are colored per chromosome.
hue_order : List[str]
Order to plot the categorical hue levels in.
palette : List[str] or Dict[Any, str]
Colors to use for the different levels of the hue variable. Can either
be a dictionary mapping values to specific colors, or a list of colors
to use.
color : matplotlib color
Color to use for all elements. Overrides palette if given.
chromosomes: List[str]
List of chromosomes to plot. Can be used to select a subset of
chromosomes or to specify a specific order.
legend : bool
Whether to draw a legend for the different hue levels.
(Only used if hue is given.)
legend_kws : Dict[str, Any]
Dictionary of additional keyword arguments to pass to ax.legend
when drawing the legend.
ax : AxesSubplot
Axis to use for drawing.
kwargs : Dict[str, Any]
Other keyword arguments are passed through to ``ax.plot`` at draw time.
Returns
-------
AxesSubplot
Axis on which the data was drawn.
"""
if chromosomes is not None:
data = data.gloc[chromosomes]
# Assemble plot data.
plot_data = pd.DataFrame({
'chromosome': data.gloc.chromosome.values,
'position': data.gloc.position_offset.values,
'y': data[y].values
}) # yapf: disable
if hue is not None and hue not in plot_data:
plot_data[hue] = data[hue]
# Order hue by data chromosome order if hue == "chromosome"
# and no specific order is given.
if hue == 'chromosome' and hue_order is None:
hue_order = data.gloc.chromosomes
# Plot using scatter.
default_plot_kws = {'markersize': 1}
plot_kws = toolz.merge(default_plot_kws, kwargs)
ax = scatter_plot(
data=plot_data,
x='position',
y='y',
hue=hue,
hue_order=hue_order,
palette=palette,
color=color,
legend=legend,
legend_kws=legend_kws,
ax=ax,
**plot_kws)
if style_axis:
# Style axis.
_draw_dividers(data.gloc.chromosome_offsets, ax=ax)
ax.set_xlabel('Chromosome')
ax.set_ylabel(y)
return ax
def _draw_dividers(chrom_offsets, ax):
"""Draws chromosome dividers at offsets to indicate chromosomal boundaries.
The chrom_offsets argument is expected to include _END_ marker (which is
included by default in GenomicDataFrames).
Parameters
----------
chrom_offsets : Dict[str, int]
Position offsets at which to draw boundaries for the
respective chromosomes.
ax : AxesSubplot
Axis to use for drawing.
"""
positions = np.array(list(chrom_offsets.values()))
# Draw dividers.
for loc in positions[1:-1]:
ax.axvline(loc, color='grey', lw=0.5, zorder=5)
# Draw xtick labels.
ax.set_xticks((positions[:-1] + positions[1:]) / 2)
ax.set_xticklabels(chrom_offsets.keys())
# Set xlim to boundaries.
ax.set_xlim(0, chrom_offsets['_END_'])
def genomic_step_plot(data,
y,
hue=None,
hue_order=None,
palette=None,
color=None,
chromosomes=None,
legend=True,
legend_kws=None,
ax=None,
style_axis=True,
**kwargs):
if chromosomes is not None:
data = data.gloc[chromosomes]
# We need to include both start/end positions in the dataframe.
# To do so, we basically create two copies of the original df
# (one with start, one with end positions), concat these two frames
# and then sort the concatenated frame by original index and position.
# Create initial frame (with start positions).
plot_data = pd.DataFrame({
'chromosome': data.gloc.chromosome.values,
'position': data.gloc.start_offset.values,
'y': data[y].values,
'index': np.arange(len(data[y]))
})
if hue is not None:
plot_data[hue] = data[hue]
# Merge with copy containing end positions.
plot_data = pd.concat(
[plot_data,
plot_data.assign(position=data.gloc.end_offset.values)],
axis=0)
# Sort by original row order.
plot_data = plot_data.sort_values(by=['index', 'position'])
plot_data = plot_data.drop('index')
# Order hue by data chromosome order if hue == "chromosome" and
# no specific order is given.
if hue == 'chromosome' and hue_order is None:
hue_order = data.gloc.chromosomes
# Plot using step.
default_step_kws = {'where': 'post'}
step_kws = toolz.merge(default_step_kws, kwargs)
ax = step_plot(
data=plot_data,
x='position',
y='y',
hue=hue,
hue_order=hue_order,
palette=palette,
color=color,
legend=legend,
legend_kws=legend_kws,
ax=ax,
**step_kws)
if style_axis:
# Style axis.
_draw_dividers(data.gloc.chromosome_offsets, ax=ax)
ax.set_xlabel('Chromosome')
ax.set_ylabel(y)
return ax
def genomic_region_plot(data,
y=None,
hue=None,
hue_order=None,
palette=None,
color=None,
chromosomes=None,
ax=None,
style_axis=True,
**kwargs):
"""Plots highlighted regions along a genomic axis.
Parameters
----------
data : pandas.DataFrame
Tidy ('long-form'') dataframe where each column is a variable and
each row is an observation. Should contain specified
{chrom,start,end}_col columns (which default to 'chromosome', 'start'
and 'end', respectively).
hue : str
Column to color the data points by.
hue_order:
Order to plot the categorical levels in, otherwise the levels
are inferred from the data objects.
palette:
Colors to use for the different levels of the hue variable.
color:
Color for all of the elements. Only used when hue is not specified.
chromosomes: List[str]
List of chromosomes to plot. Can be used to select a subset of
chromosomes or to specify a specific order.
ax : matplotlib Axes, optional
Axes object to draw the plot onto.
style_axis : bool
Whether to style axes with dividers, labels etc. If False, leaves the
axis unchanged. Useful for combining with other functions
(such as plot_genomic) to avoid double drawing of annotations.
kwargs : Dict[str, Any]
Other keyword arguments are passed through to ``PatchCollection``
(when drawing with y values) or ``ax.axvspan`` (when drawing without
y values) at draw time.
Returns
-------
matplotlib.Axes
The Axes object containing the plot.
"""
from matplotlib import pyplot as plt
if chromosomes is not None:
data = data.gloc[chromosomes]
# Default axes.
if ax is None:
_, ax = plt.subplots()
# Assemble plot data.
plot_data = pd.DataFrame({
'chromosome': data.gloc.chromosome.values,
'start': data.gloc.start_offset.values,
'end': data.gloc.end_offset.values
})
if y is not None:
plot_data['value'] = data[y].values
draw_func = _draw_region_patches
else:
draw_func = _draw_region_spans
if hue is None:
draw_func(plot_data, ax=ax, color=color, **kwargs)
else:
plot_data['hue'] = data[hue]
plot_data = plot_data.assign(_color=apply_palette(
plot_data[hue], palette, order=hue_order))
for (_, color), grp in plot_data.groupby(['hue', '_color']):
draw_func(grp, ax=ax, color=color, **kwargs)
if style_axis:
# Style axis.
_draw_dividers(data.gloc.chromosome_offsets, ax=ax)
ax.set_xlabel('Chromosome')
if y is not None:
ax.set_ylabel(y)
# TODO: Set ylim?
return ax
def _draw_region_patches(grp, ax, color=None, **kwargs):
from matplotlib import patches as mpl_patches
from matplotlib import collections as mpl_collections
grp = grp.assign(width=grp['end'] - grp['start'])
patches = mpl_collections.PatchCollection(
(mpl_patches.Rectangle(
xy=(tup.start, 0), width=tup.width, height=tup.value)
for tup in grp.itertuples()),
facecolor=color,
edgecolor=color,
**kwargs)
ax.add_collection(patches)
def _draw_region_spans(grp, ax, color=None, **kwargs):
for tup in grp.itertuples():
ax.axvspan(tup.start, tup.end, color=color, **kwargs)
|
[
"FeliciaWilliamson@users.noreply.github.com"
] |
FeliciaWilliamson@users.noreply.github.com
|
005264f15500de2b29d4e16c24c16ff4bd7582da
|
38fcefd669647388d8aa58089cbecc18e0059f35
|
/LM/PYTHON/Ejercicios secuenciales/Ejercicio 13.py
|
87a5d5a13653cd69dcec8829e6c4e78f211d7312
|
[] |
no_license
|
ManuelLoraRoman/Apuntes-1-ASIR
|
42309fa4effc576cf1d475c96156a58a1e7af879
|
bcbba6ada3b398720261d4a083aa45b670a61af1
|
refs/heads/main
| 2023-01-20T05:44:35.218782
| 2020-11-28T21:44:07
| 2020-11-28T21:44:07
| 316,726,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
#Realizar un algoritmos que lea un número y que muestre su raíz cuadrada y su raíz cúbica.
#Python3 no tiene ninguna función predefinida que permita calcular la raíz cúbica, ¿Cómo se puede calcular?
import math
num = int(input("Dime un número:"))
raizcua = math.sqrt(num)
raizcub = num ** 1/3
print("La raíz cuadrada del número",num,"es",raizcua,"y la raíz cúbica es",raizcub,".")
|
[
"manuelloraroman@gmail.com"
] |
manuelloraroman@gmail.com
|
8329fa5bea57d4f6278bd16ce249d56f50672bc7
|
2c872fedcdc12c89742d10c2f1c821eed0470726
|
/pyNet/day06/code/test_poll.py
|
5f18a3bc99a060b68f525c9f9e72ac31b6e740a3
|
[] |
no_license
|
zuigehulu/AID1811
|
581c3c7a37df9fa928bc632e4891fc9bafe69201
|
10cab0869875290646a9e5d815ff159d0116990e
|
refs/heads/master
| 2020-04-19T16:33:04.174841
| 2019-01-30T07:58:24
| 2019-01-30T07:58:24
| 168,307,918
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from multiprocessing import Pool
from time import sleep
L = [1,2,3,4,5]
def a(ne):
print(ne**2)
sleep(2)
pool = Pool()
for i in L:
pool.apply_async(a,(i,))
pool.close()
pool.join()
|
[
"442315617@qq.com"
] |
442315617@qq.com
|
bf179785fa2e221dfcdd53d62e123b18762c8136
|
4610d4365583979b11ec3861c611ac88b4dff67d
|
/Oefenopdrachten/Oefenopdracht3_3.py
|
a197ec8d00c25d69bb60abca1f7a6bca9429c756
|
[] |
no_license
|
NielsRisseeuw/PROG_Huiswerk
|
bdd9cc7db78dfdc23796bc7214143f2df1f6db5b
|
0349f1575c31f837f39fcde750c502993c8c3796
|
refs/heads/master
| 2021-08-08T15:04:11.039430
| 2017-11-10T14:52:07
| 2017-11-10T14:52:07
| 103,146,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
leeftijd = eval(input('Wat is je leeftijd: '))
paspoort = input('Heb je een Nederlands paspoort: ')
if leeftijd >= 18 and paspoort == 'ja':
print('Gefeliciteerd, je mag stemmen!')
else:
print('Jammer, je mag niet stemmen')
|
[
"niels.risseeuw@student.hu.nl"
] |
niels.risseeuw@student.hu.nl
|
04ffe19889c2d4c88c5284eee383406c5351f78b
|
c5321690a4c96bc8f1cade3c8ca6ce681b1fe8f2
|
/project_hila/questions/urls.py
|
860a185491e428db4ec154d123f224cae59214a9
|
[] |
no_license
|
ManchiGitHub/project_hila
|
93f90d4787a6276d113b6982960c4489144840b9
|
91885f06f42511f304ac4dcf33ab41f3a0742937
|
refs/heads/master
| 2023-09-02T14:20:40.393068
| 2021-06-23T08:59:16
| 2021-06-23T08:59:16
| 375,990,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('create_question/', views.create_question_view, name="create_question"),
path('questions_repository', views.questions_repository_view, name="questions_repository"),
path('create_questionnaire/<str:key>/', views.create_questionnaire, name="create_questionnaire"),
path('send_questionnaire/',views.send_questionnaire, name="send_questionnaire"),
]
|
[
"66803951+ManchiGitHub@users.noreply.github.com"
] |
66803951+ManchiGitHub@users.noreply.github.com
|
9fab7862569f3b81e8858813a45e4e303cf5f617
|
62ea30dc6f7c36417e0edfaca73fc27141ca43a3
|
/user.py
|
049fc6ca49d2a55d34cb7baa5c96771c722d17ce
|
[] |
no_license
|
chulinx/PythonStudy
|
2571b484cc182f2568593c1787b0ee2aa0802fba
|
178c38ec5abfcb3e952e7c2c9ea94a411014633a
|
refs/heads/master
| 2020-12-24T20:10:13.684778
| 2017-04-04T12:04:33
| 2017-04-04T12:04:33
| 86,238,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
#!/usr/bin/python3
users=[]
zhuce=input("Do you want sign up my web? if you want ,please input Y else input N: ")
while zhuce == 'Y':
name=input("Input your name: ")
if name == 'exit':
break
else:
users.append(name)
print(users)
|
[
"chulinx@163.com"
] |
chulinx@163.com
|
da0de991295a250dbfc4238a27b5f8573f7770a8
|
48c6b58e07891475a2c60a8afbbbe6447bf527a7
|
/src/tests/control/test_orders.py
|
74c7007fa3a989b7c5f5133361a5abbc1a3dcc33
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
awg24/pretix
|
a9f86fe2dd1f3269734ed39b3ea052ef292ff110
|
b1d67a48601838bac0d4e498cbe8bdcd16013d60
|
refs/heads/master
| 2021-01-15T23:40:28.582518
| 2015-08-27T14:05:18
| 2015-08-27T14:05:18
| 42,126,402
| 1
| 0
| null | 2015-09-08T16:58:52
| 2015-09-08T16:58:51
| null |
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
from datetime import timedelta
from decimal import Decimal
import pytest
from django.utils.timezone import now
from pretix.base.models import (
Event, EventPermission, Item, Order, OrderPosition, Organizer,
OrganizerPermission, User,
)
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), plugins='pretix.plugins.banktransfer'
)
user = User.objects.create_user('dummy@dummy.dummy', 'dummy@dummy.dummy', 'dummy')
EventPermission.objects.create(
event=event,
user=user,
can_view_orders=True,
can_change_orders=True
)
o = Order.objects.create(
code='FOO', event=event,
user=user, status=Order.STATUS_PENDING,
datetime=now(), expires=now() + timedelta(days=10),
total=0, payment_provider='banktransfer'
)
ticket = Item.objects.create(event=event, name='Early-bird ticket',
category=None, default_price=23,
admission=True)
event.settings.set('attendee_names_asked', True)
OrderPosition.objects.create(
order=o,
item=ticket,
variation=None,
price=Decimal("14"),
attendee_name="Peter"
)
return event, user, o
@pytest.mark.django_db
def test_order_list(client, env):
client.login(identifier='dummy@dummy.dummy', password='dummy')
response = client.get('/control/event/dummy/dummy/orders/')
assert 'FOO' in response.rendered_content
response = client.get('/control/event/dummy/dummy/orders/?user=peter')
assert 'FOO' not in response.rendered_content
response = client.get('/control/event/dummy/dummy/orders/?user=dummy')
assert 'FOO' in response.rendered_content
response = client.get('/control/event/dummy/dummy/orders/?status=p')
assert 'FOO' not in response.rendered_content
response = client.get('/control/event/dummy/dummy/orders/?status=n')
assert 'FOO' in response.rendered_content
@pytest.mark.django_db
def test_order_detail(client, env):
client.login(identifier='dummy@dummy.dummy', password='dummy')
response = client.get('/control/event/dummy/dummy/orders/FOO/')
assert 'Early-bird' in response.rendered_content
assert 'Peter' in response.rendered_content
@pytest.mark.django_db
def test_order_transition_cancel(client, env):
client.login(identifier='dummy@dummy.dummy', password='dummy')
client.post('/control/event/dummy/dummy/orders/FOO/transition', {
'status': 'c'
})
o = Order.objects.current.get(identity=env[2].identity)
assert o.status == Order.STATUS_CANCELLED
@pytest.mark.django_db
def test_order_transition_to_paid_success(client, env):
client.login(identifier='dummy@dummy.dummy', password='dummy')
client.post('/control/event/dummy/dummy/orders/FOO/transition', {
'status': 'p'
})
o = Order.objects.current.get(identity=env[2].identity)
assert o.status == Order.STATUS_PAID
|
[
"mail@raphaelmichel.de"
] |
mail@raphaelmichel.de
|
e51792809cd4a793497d9faddab15f893349e70b
|
cb93a5de78aebd1fdff3ac8cddae5164ba1bced6
|
/ctmc/birth_death.py
|
54ea3e2ece1684df5fe36fe56a53a8bcf3391799
|
[] |
no_license
|
tseller/ctmc
|
5e64770ff77c09107cf47fd9e45a41ee4f87e1a0
|
48159c6062e5a2ef010e00edeb1fdeef25e72b7a
|
refs/heads/dev
| 2016-09-06T10:08:48.573210
| 2015-12-04T21:39:30
| 2015-12-04T21:39:30
| 28,284,893
| 1
| 2
| null | 2015-12-04T21:40:19
| 2014-12-21T01:34:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
from ctmc import CTMC
import numpy as np
class BirthDeath(CTMC):
''' Birth-Death Process '''
def __init__(
self,
num_states,
forward, # forward rate
backward, # backward rate
):
# turn scalars into arrays
if isinstance(forward, (int, long, float)) and isinstance(backward, (int, long, float)):
# forward and backward are scalars
forward = forward * np.ones(num_states)
backward = backward * np.ones(num_states)
elif isinstance(forward, (int, long ,float)):
# backward is an array, forward is not
forward = forward * np.ones(len(backward))
else:
# forward is an array, backward is not
backward = backward * np.ones(len(forward))
# set the final element of the forward array and the first element of the backward array to 0
self.forward = np.append(np.asarray(forward)[:-1], 0)
self.backward = np.append(0, np.asarray(backward)[1:])
if (self.forward < 0).any() or (self.backward < 0).any():
raise ValueError('forward and backward may not be negative.')
Q = - np.diag(np.append(self.forward[:-1], 0)) \
- np.diag(np.append(0, self.backward[1:])) \
+ np.diag(self.forward[:-1], 1) \
+ np.diag(self.backward[1:], -1)
super(BirthDeath, self).__init__(
Q=Q
)
self.add_metric('population', np.arange(num_states))
|
[
"tseller@gmail.com"
] |
tseller@gmail.com
|
56ac5f435c1505b586a07d2bf83a64eff2564702
|
60b5a9a8b519cb773aca004b7217637f8a1a0526
|
/customer/urls.py
|
2bf028b3dc13eed4b4e9793741b3f478b4d5d355
|
[] |
no_license
|
malep2007/dag-bragan-erp-backend
|
76ce90c408b21b0bda73c6dd972e2f77b7f21b1f
|
e98182af2848a6533ddd28c586649a8fee1dc695
|
refs/heads/master
| 2021-08-11T01:29:27.864747
| 2019-01-15T17:46:26
| 2019-01-15T17:46:26
| 151,831,965
| 0
| 0
| null | 2021-06-10T20:56:21
| 2018-10-06T11:10:12
|
Python
|
UTF-8
|
Python
| false
| false
| 437
|
py
|
from django.urls import path, reverse
from . import views
urlpatterns = [
path('', views.CustomerListView.as_view(), name='index'),
path('<int:pk>/', views.CustomerDetailView.as_view(), name="detail"),
path('edit/<int:pk>/', views.CustomerUpdateView.as_view(), name='edit'),
path('add/', views.CustomerCreateView.as_view(), name='add'),
path('delete/<int:pk>/', views.CustomerDeleteView.as_view(), name='delete'),
]
|
[
"ephraim.malinga@gmail.com"
] |
ephraim.malinga@gmail.com
|
0a5a579dd0a6d232526835dc574518dcbab6e108
|
8393f28f390e222b0429fc4f3f07590f86333d8d
|
/linux-stuff/bin/svn-merge-meld
|
7c7beb77f18c8c2b9a4dbfe1bd016a679f58f12d
|
[] |
no_license
|
jmangelson/settings
|
fe118494252da35b175d159bbbef118f22b189fb
|
df9291f8947ba1ceb7c83a731dfbe9e775ce5add
|
refs/heads/master
| 2021-01-16T17:39:24.105679
| 2015-02-20T01:17:26
| 2015-02-20T01:17:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
#!/usr/bin/env python
# svn merge-tool python wrapper for meld
import sys
import subprocess
# path to meld ($ which meld)
meld = "/usr/bin/meld"
log = False
f = open('/tmp/svn-merge-meld.log', 'a')
def main():
if log:
f.write("call: %r\n" % sys.argv)
# file paths
base = sys.argv[1]
theirs = sys.argv[2]
mine = sys.argv[3]
merged = sys.argv[4]
partial = sys.argv[5]
# the call to meld
cmd = [meld, mine, theirs, merged]
# Call meld, making sure it exits correctly
subprocess.check_call(cmd)
try:
main()
except Exception as e:
print "Oh noes, an error: %r" % e
if log:
f.write("Error: %r\n" % e)
sys.exit(-1)
|
[
"devnull@localhost"
] |
devnull@localhost
|
|
c62f7c54c41cbcdc27f815f27028d52967573ee0
|
c656e7bf15f19d376185adcbc8af68541252b8c1
|
/apilib/exceptions.py
|
81d44460e938389e89b51e47b3f802b3f3c6dad6
|
[] |
no_license
|
UnicycleLabs/apilib
|
43ca79c832a5cc0ed67921e4fd3c3a8e2c7a82eb
|
bf58376c9c4d532ae6a12aa5685089f8e38c3d43
|
refs/heads/master
| 2021-01-10T15:20:31.018677
| 2018-03-31T23:48:14
| 2018-03-31T23:48:14
| 55,811,676
| 1
| 1
| null | 2019-08-07T02:20:13
| 2016-04-08T21:48:36
|
Python
|
UTF-8
|
Python
| false
| false
| 595
|
py
|
class ApilibException(Exception):
pass
class UnknownFieldException(ApilibException):
pass
class ModuleRequired(ApilibException):
pass
class ConfigurationRequired(ApilibException):
pass
class NotInitialized(ApilibException):
pass
class DeserializationError(ApilibException):
def __init__(self, errors):
self.errors = errors
def __str__(self):
return 'DeserializationError:\n %s' % '\n '.join(str(e) for e in self.errors)
class MethodNotFoundException(ApilibException):
pass
class MethodNotImplementedException(ApilibException):
pass
|
[
"jonathan@unicyclelabs.com"
] |
jonathan@unicyclelabs.com
|
c7711333f4d8e8f37e54110c5e0412dd308ca7ea
|
afe7a071a7bf3219df96902837d8f299f9683f11
|
/contrib/seeds/generate-seeds.py
|
4de290d7ccf303e48f7680e9fd8226ab77488815
|
[
"MIT"
] |
permissive
|
ArrayCoreDevelopers/Array
|
96ece6ccab529ea336f08cd64cf8faaf8fc83894
|
dc4d28a14eb1af9235885527fd27c4a1a8bf1a7a
|
refs/heads/main
| 2023-07-14T20:07:34.804029
| 2021-08-12T21:53:20
| 2021-08-12T21:53:20
| 363,210,099
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,379
|
py
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 34260)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 44260)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
[
"83463254+ArrayCoreDevelopers@users.noreply.github.com"
] |
83463254+ArrayCoreDevelopers@users.noreply.github.com
|
33d846d15a85d3ab4338299fb35838a398963ec3
|
1db60c220307909be3266ae435f230ce9e7625de
|
/TDD_String_Calculator.py
|
e25c7a75e3ce3d5c9dcc8c21187089a1be7824c3
|
[] |
no_license
|
viniciusmvf/String-Calculator
|
ab64a4f2c8562aea510dd9b14ff3ea751950ff22
|
5dff905df13068f6127309cfe28cb3e17d65e317
|
refs/heads/master
| 2020-06-10T22:11:24.228236
| 2019-06-14T07:48:47
| 2019-06-14T07:48:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
import re
def sum_func(num_list):
sum =0
for i in num_list:
if i == '':
num = 0
else:
num = int(i)
if num < 0:
raise ValueError('Negatives not allowed')
elif num > 1000:
num = 0
sum += num
return sum
def add(num):
search1 = re.search('^//(.*)', num)
if num == '':
num_list = []
elif '[' in num:
cnt = 0
for i in num:
if i == '[':
cnt += 1
delim = '\[(.*)]' * cnt
search = re.search(f'^//{delim}+', num)
if search:
delimiter = ''
for i in range(cnt):
delimiter += search.groups()[i]
num_list = re.split('['+delimiter+'\n\]\[]', num[4 + len(delimiter):])
elif search1:
delimiter = search1.groups()[0]
num_list = re.split('['+delimiter+'\n]', num[3 + len(delimiter):])
else:
num_list = re.split('[,\n]', num)
return sum_func(num_list)
|
[
"nkumaneandy@gmail.com"
] |
nkumaneandy@gmail.com
|
0067be0794868297778b2c58e8e3f9fe379dba44
|
65f4fe32c15614aec492eceb3e810aba54c9a564
|
/app/quiz/models/true_false.py
|
25a6d7be51440f0c4411d33df6f86d557f922a06
|
[] |
no_license
|
tartieret/django-starter
|
204da20b550ab2725d1831b663255f7138d8c921
|
4c2199af6a938e51ce84bcacb305d43ca398911d
|
refs/heads/master
| 2023-02-22T06:44:37.243636
| 2021-01-21T03:15:25
| 2021-01-21T03:15:25
| 314,928,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from .question import Question
class TFQuestion(Question):
correct = models.BooleanField(
blank=False,
default=False,
help_text=_(
"Tick this if the question " "is true. Leave it blank for" " false."
),
verbose_name=_("Correct"),
)
def check_if_correct(self, guess):
if guess == "True":
guess_bool = True
elif guess == "False":
guess_bool = False
else:
return False
if guess_bool == self.correct:
return True
else:
return False
def get_answers(self):
return [
{"correct": self.check_if_correct("True"), "content": "True"},
{"correct": self.check_if_correct("False"), "content": "False"},
]
def get_answers_list(self):
return [(True, True), (False, False)]
def answer_choice_to_string(self, guess):
return str(guess)
class Meta:
verbose_name = _("True/False Question")
verbose_name_plural = _("True/False Questions")
# ordering = ["category"]
|
[
"thomas.tartiere@panevo.com"
] |
thomas.tartiere@panevo.com
|
356835a4616e8a1b5d15cb8791d60dd3a24ebc76
|
997a9e94260606eb4137af785fbc9b12ff0ce11b
|
/Nueva carpeta (2)/prueba import.py
|
a6c0a92d50101f682c029825fe2974896119d241
|
[] |
no_license
|
GerardoNavaDionicio/Programacion_Orientada_a_Objetos
|
dc741c28223be81ac844a0bae8ab12d9bfa52076
|
331bab7605e4394605c4a87c05e42ddda739069a
|
refs/heads/main
| 2023-07-15T11:35:06.940089
| 2021-08-25T16:18:24
| 2021-08-25T16:18:24
| 368,373,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
from suma import min,max
def sumar (x,y):
return x+y
print (sumar(5,9))
print(min(6,12))
print(max(34,10))
|
[
"77888428+GerardoNavaDionicio@users.noreply.github.com"
] |
77888428+GerardoNavaDionicio@users.noreply.github.com
|
daf9ca0b4ab0f407a65d6d2d6af152c79c81fb7d
|
164ae0c291dde6c1826bd8fb912527777173f594
|
/algorithms/number-of-dice-rolls-with-target-sum.py
|
a55cf4df0045194f5bcbe0b85290772a53c57a7e
|
[] |
no_license
|
victorvg17/leet-code
|
6ba86ce4d52166056ac849c787c5b528d6ae33ed
|
94fcd4c049d1033cb5d3da8902b578eedb7a080e
|
refs/heads/master
| 2023-06-25T14:54:04.470366
| 2021-07-16T09:24:01
| 2021-07-16T09:24:01
| 260,661,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
class Solution:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
mem = {}
def dp(d: int, target: int) -> int:
if d == 0:
return 0 if target > 0 else 1
if (d, target) in mem:
return mem[(d, target)]
to_return = 0
for i in range(max(0, target - f), target):
to_return += dp(d-1, i)
mem[(d, target)] = to_return
return to_return
return dp(d, target) % (10**9 + 7)
|
[
"victorgeorge@Victors-MacBook-Pro.local"
] |
victorgeorge@Victors-MacBook-Pro.local
|
d521df7b8411fd72e936546db701ba9a222ee230
|
57cca11dc6c32d5eb134464e34f0a1a4209e8e1f
|
/OpenCV with Python for Image and Video Analysis(sentdex)/Edge Detection and Gradients - 10.py
|
2153521e4307693477b2dd318634f289c1052a4f
|
[] |
no_license
|
21tushar/Python-Tutorials
|
a8d035981f91fd56da0daf9eada4b9a43d1e3439
|
ca9d927fd72a758cff9764a1b21fa9cbced10ee2
|
refs/heads/master
| 2020-03-13T06:14:31.635494
| 2018-07-29T16:46:18
| 2018-07-29T16:46:18
| 131,000,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
laplacian = cv2.Laplacian(frame, cv2.CV_64F)
sobelx = cv2.Sobel(frame, cv2.CV_64F, 1, 0, ksize=5)
sobely = cv2.Sobel(frame, cv2.CV_64F, 0, 1, ksize=5)
edges = cv2.Canny(frame, 100, 200)
cv2.imshow('laplacian', laplacian)
cv2.imshow('sobelx', sobelx)
cv2.imshow('sobely', sobely)
cv2.imshow('edges', edges)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
[
"noreply@github.com"
] |
noreply@github.com
|
4dca72e67c09881156eab57d557b557a221e3668
|
da9d4e3fe7d7804b53d742e6ad49fc4b655985bd
|
/backend/src/api.py
|
6343628501da022a937c6faa6e9f7494732cf46a
|
[] |
no_license
|
RanaEmad/coffee-shop-full-stack
|
b3c59ecd50ae97e9018ee0874db5af091e499391
|
9f1c979cadbb3a1f401b8288fea0b5d9142387e5
|
refs/heads/master
| 2022-11-28T11:14:52.357419
| 2020-08-08T21:12:23
| 2020-08-08T21:12:23
| 286,028,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,159
|
py
|
import os
from flask import Flask, request, jsonify, abort
from sqlalchemy import exc
import json
from flask_cors import CORS
from .database.models import db_drop_and_create_all, setup_db, Drink
from .auth.auth import AuthError, requires_auth
app = Flask(__name__)
setup_db(app)
CORS(app)
'''
@TODO uncomment the following line to initialize the datbase
!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH
!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN
'''
db_drop_and_create_all()
# ROUTES
'''
@TODO implement endpoint
GET /drinks
it should be a public endpoint
it should contain only the drink.short() data representation
returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
'''
@app.route("/drinks")
def get_drinks():
drinks = Drink.query.all()
formatted_drinks = []
for drink in drinks:
formatted_drinks.append(drink.short())
response = {
"success": True,
"drinks": formatted_drinks
}
return jsonify(response)
'''
@TODO implement endpoint
GET /drinks-detail
it should require the 'get:drinks-detail' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
'''
@app.route("/drinks-detail")
@requires_auth('get:drinks-detail')
def get_drinks_detail(jwt):
drinks = Drink.query.all()
formatted_drinks = []
for drink in drinks:
formatted_drinks.append(drink.long())
response = {
"success": True,
"drinks": formatted_drinks
}
return jsonify(response)
'''
@TODO implement endpoint
POST /drinks
it should create a new row in the drinks table
it should require the 'post:drinks' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the newly created drink
or appropriate status code indicating reason for failure
'''
@app.route('/drinks', methods=['POST'])
@requires_auth('post:drinks')
def add_drink(jwt):
if "title" not in request.get_json() or request.get_json()["title"] == "" or "recipe" not in request.get_json() or request.get_json()["recipe"] == "":
abort(400)
exists = Drink.query.filter_by(
title=request.get_json()["title"]).one_or_none()
if exists:
abort(400)
drink = Drink(title=request.get_json()[
"title"], recipe=json.dumps(request.get_json()["recipe"]))
drink.insert()
response = {
"success": True,
"drinks": [drink.long()]
}
return jsonify(response)
'''
@TODO implement endpoint
PATCH /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should update the corresponding row for <id>
it should require the 'patch:drinks' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the updated drink
or appropriate status code indicating reason for failure
'''
@app.route("/drinks/<id>", methods=['PATCH'])
@requires_auth('patch:drinks')
def update_drink(jwt, id):
drink = Drink.query.filter_by(id=id).one_or_none()
if drink is None:
abort(404)
if "title" in request.get_json():
drink.title = request.get_json()["title"]
if "recipe" in request.get_json():
drink.recipe = json.dumps(request.get_json()["recipe"])
drink.update()
response = {
"success": True,
"drinks": [drink.long()]
}
return jsonify(response)
'''
@TODO implement endpoint
DELETE /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should delete the corresponding row for <id>
it should require the 'delete:drinks' permission
returns status code 200 and json {"success": True, "delete": id} where id is the id of the deleted record
or appropriate status code indicating reason for failure
'''
@app.route('/drinks/<id>', methods=['DELETE'])
@requires_auth('delete:drinks')
def delete_drink(jwt, id):
drink = Drink.query.filter_by(id=id).one_or_none()
if drink is None:
abort(404)
drink.delete()
response = {
"success": True,
"delete": id
}
return jsonify(response)
# Error Handling
'''
Example error handling for unprocessable entity
'''
@ app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
'''
@TODO implement error handlers using the @app.errorhandler(error) decorator
each error handler should return (with approprate messages):
jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
'''
'''
@TODO implement error handler for 404
error handler should conform to general task above
'''
@ app.errorhandler(404)
def notfound(error):
return jsonify({
"success": False,
"error": 404,
"message": "Not Found."
}), 404
'''
@TODO implement error handler for AuthError
error handler should conform to general task above
'''
@ app.errorhandler(401)
def unauthorized(error):
return jsonify({
"success": False,
"error": 401,
"message": "Unauthorized Access."
}), 401
@ app.errorhandler(500)
def internal(error):
return jsonify({
"success": False,
"error": 500,
"message": "Internal Server Error."
}), 500
@ app.errorhandler(400)
def invalid(error):
return jsonify({
"success": False,
"error": 400,
"message": "Invalid Data."
}), 400
|
[
"rana3emad@gmail.com"
] |
rana3emad@gmail.com
|
75d1a00d171815f01a3c90a88f85ddabc2422950
|
f77522423e2782b87c22bdd5606b1be339e9223b
|
/lotto/models.py
|
f0893acd3039c393dc39e5fa5956171556aafeee
|
[] |
no_license
|
kwanghee123/project1
|
918936ae36c20f158aa1a99b562c5b7f05e868bb
|
a8f5c7800ed1bc94e3e8d30ba2219b1a3f9cc9f3
|
refs/heads/master
| 2020-04-02T16:58:55.132398
| 2018-10-25T08:41:49
| 2018-10-25T08:41:49
| 154,638,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
from django.db import models
from django.utils import timezone
import random
class GuessNumbers(models.Model):
name = models.CharField(max_length=24)
lottos = models.CharField(max_length=255, default='[1,2,3,4,5,6]')
text = models.CharField(max_length=255)
num_lotto = models.IntegerField(default=5)
update_date = models.DateTimeField()
def __str__(self):
return '%s %s' % (self.name, self.text)
def generate(self):
self.lottos = ""
origin = list(range(1, 46))
for _ in range(0, self.num_lotto):
random.shuffle(origin)
guess = origin[:6]
guess.sort()
self.lottos += str(guess) + '\n'
self.update_date = timezone.now()
self.save()
class Location(models.Model):
lat = models.FloatField()
lng = models.FloatField()
name = models.CharField(max_length=30)
|
[
"ykhzang2@gmail.com"
] |
ykhzang2@gmail.com
|
55b1ef245e9a7cb31d87bfd61a9576b63fdc7fdc
|
244ecfc2017a48c70b74556be8c188e7a4815848
|
/res/scripts/client/gui/scaleform/daapi/view/lobby/cybersport/staticformationunitview.py
|
8a3ccb32d12fe54c88e9e5068baa81fe574abd77
|
[] |
no_license
|
webiumsk/WOT-0.9.12
|
c1e1259411ba1e6c7b02cd6408b731419d3174e5
|
5be5fd9186f335e7bae88c9761c378ff5fbf5351
|
refs/heads/master
| 2021-01-10T01:38:36.523788
| 2015-11-18T11:33:37
| 2015-11-18T11:33:37
| 46,414,438
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 14,220
|
py
|
# 2015.11.18 11:53:51 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/cyberSport/StaticFormationUnitView.py
import BigWorld
from UnitBase import UNIT_OP
from gui import makeHtmlString
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.shared.formatters import text_styles, icons
from gui.shared.utils.functions import makeTooltip
from gui.Scaleform.daapi.view.lobby.profile.ProfileUtils import ProfileUtils
from gui.Scaleform.daapi.view.lobby.rally.vo_converters import makeVehicleVO
from gui.Scaleform.daapi.view.lobby.rally.ActionButtonStateVO import ActionButtonStateVO
from gui.Scaleform.daapi.view.lobby.rally import vo_converters, rally_dps
from gui.Scaleform.daapi.view.meta.StaticFormationUnitMeta import StaticFormationUnitMeta
from gui.Scaleform.locale.CYBERSPORT import CYBERSPORT
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.clubs import events_dispatcher as club_events
from gui.clubs.club_helpers import ClubListener
from gui.clubs.settings import getLadderChevron64x64, getLadderBackground
from gui.prb_control import settings
from gui.prb_control.context import unit_ctx
from gui.prb_control.settings import REQUEST_TYPE
from gui.shared import g_itemsCache
from gui.shared.view_helpers.emblems import ClubEmblemsHelper
from gui.game_control.battle_availability import isHourInForbiddenList
from helpers import int2roman
class StaticFormationUnitView(StaticFormationUnitMeta, ClubListener, ClubEmblemsHelper):
ABSENT_VALUES = '--'
def __init__(self):
super(StaticFormationUnitView, self).__init__()
self.__extra = self.unitFunctional.getExtra()
self.__clubDBID = self.__extra.clubDBID
def getCoolDownRequests(self):
requests = super(StaticFormationUnitView, self).getCoolDownRequests()
requests.extend((REQUEST_TYPE.CLOSE_SLOT, REQUEST_TYPE.CHANGE_RATED))
return requests
def onClubEmblem64x64Received(self, clubDbID, emblem):
if emblem:
self.as_setTeamIconS(self.getMemoryTexturePath(emblem))
def onClubMembersChanged(self, members):
self.__updateHeader()
self._updateMembersData()
def onClubUpdated(self, club):
self.__updateHeader()
def onAccountClubStateChanged(self, state):
self.__updateHeader()
def onAccountClubRestrictionsChanged(self):
self.__updateHeader()
def onClubNameChanged(self, name):
self.__updateHeader()
def onClubLadderInfoChanged(self, ladderInfo):
self.__updateHeader()
def onClubsSeasonStateChanged(self, seasonState):
self.__updateHeader()
def onStatusChanged(self):
self.__updateHeader()
def __makeLegionnairesCountString(self, unit):
legionnairesString = makeHtmlString('html_templates:lobby/cyberSport/staticFormationUnitView', 'legionnairesCount', {'cur': unit.getLegionaryCount(),
'max': unit.getLegionaryMaxCount()})
return legionnairesString
def onUnitPlayerRolesChanged(self, pInfo, pPermissions):
functional = self.unitFunctional
_, unit = functional.getUnit()
if self._candidatesDP is not None:
self._candidatesDP.rebuild(functional.getCandidates())
self.as_setLegionnairesCountS(False, self.__makeLegionnairesCountString(unit))
self.__updateHeader()
self._updateMembersData()
self.__updateTotalData()
return
def onUnitFlagsChanged(self, flags, timeLeft):
functional = self.unitFunctional
pInfo = functional.getPlayerInfo()
isCreator = pInfo.isCreator()
if isCreator and flags.isOpenedStateChanged():
self.as_setOpenedS(flags.isOpened(), vo_converters.makeStaticFormationStatusLbl(flags))
if flags.isChanged():
self._updateMembersData()
else:
self._setActionButtonState()
def onUnitSettingChanged(self, opCode, value):
if opCode == UNIT_OP.SET_COMMENT:
self.as_setCommentS(self.unitFunctional.getCensoredComment())
elif opCode in [UNIT_OP.CLOSE_SLOT, UNIT_OP.OPEN_SLOT]:
functional = self.unitFunctional
_, unit = functional.getUnit()
unitFlags = functional.getFlags()
slotState = functional.getSlotState(value)
pInfo = functional.getPlayerInfo()
canAssign, vehicles = pInfo.canAssignToSlot(value)
canTakeSlot = not (pInfo.isLegionary() and unit.isClub())
vehCount = len(vehicles)
slotLabel = vo_converters.makeStaticSlotLabel(unitFlags, slotState, pInfo.isCreator(), vehCount, pInfo.isLegionary(), unit.isRated())
if opCode == UNIT_OP.CLOSE_SLOT:
self.as_closeSlotS(value, settings.UNIT_CLOSED_SLOT_COST, slotLabel)
else:
self.as_openSlotS(value, canAssign and canTakeSlot, slotLabel, vehCount)
self.__updateTotalData()
self._setActionButtonState()
def onUnitVehicleChanged(self, dbID, vInfo):
functional = self.unitFunctional
pInfo = functional.getPlayerInfo(dbID=dbID)
if pInfo.isInSlot:
slotIdx = pInfo.slotIdx
if not vInfo.isEmpty():
vehicleVO = makeVehicleVO(g_itemsCache.items.getItemByCD(vInfo.vehTypeCD), functional.getRosterSettings().getLevelsRange())
slotCost = vInfo.vehLevel
else:
slotState = functional.getSlotState(slotIdx)
vehicleVO = None
if slotState.isClosed:
slotCost = settings.UNIT_CLOSED_SLOT_COST
else:
slotCost = 0
self.as_setMemberVehicleS(slotIdx, slotCost, vehicleVO)
self.__updateTotalData()
if pInfo.isCurrentPlayer() or functional.getPlayerInfo().isCreator():
self._setActionButtonState()
return
def onUnitMembersListChanged(self):
functional = self.unitFunctional
_, unit = functional.getUnit()
if self._candidatesDP is not None:
self._candidatesDP.rebuild(functional.getCandidates())
self.as_setLegionnairesCountS(False, self.__makeLegionnairesCountString(unit))
self.__updateHeader()
self._updateMembersData()
self.__updateTotalData()
return
def onUnitExtraChanged(self, extra):
self.__extra = self.unitFunctional.getExtra()
self.__updateHeader()
self._updateMembersData()
self.__updateTotalData()
def onUnitRejoin(self):
super(StaticFormationUnitView, self).onUnitRejoin()
functional = self.unitFunctional
_, unit = functional.getUnit()
if self._candidatesDP is not None:
self._candidatesDP.rebuild(functional.getCandidates())
self.as_setLegionnairesCountS(False, self.__makeLegionnairesCountString(unit))
self.__updateHeader()
self._updateMembersData()
self.__updateTotalData()
return
def toggleStatusRequest(self):
self.requestToOpen(not self.unitFunctional.getFlags().isOpened())
def initCandidatesDP(self):
self._candidatesDP = rally_dps.StaticFormationCandidatesDP()
self._candidatesDP.init(self.app, self.as_getCandidatesDPS(), self.unitFunctional.getCandidates())
def rebuildCandidatesDP(self):
self._candidatesDP.rebuild(self.unitFunctional.getCandidates())
def setRankedMode(self, isRated):
self.sendRequest(unit_ctx.ChangeRatedUnitCtx(isRated, 'prebattle/change_settings'))
def showTeamCard(self):
club_events.showClubProfile(self.__clubDBID)
def onSlotsHighlihgtingNeed(self, databaseID):
functional = self.unitFunctional
availableSlots = list(functional.getPlayerInfo(databaseID).getAvailableSlots(True))
pInfo = functional.getPlayerInfo(dbID=databaseID)
if not pInfo.isInSlot and pInfo.isLegionary():
_, unit = functional.getUnit()
if unit.isRated():
self.as_highlightSlotsS([])
return []
if unit.getLegionaryCount() >= unit.getLegionaryMaxCount():
legionariesSlots = unit.getLegionarySlots().values()
self.as_highlightSlotsS(legionariesSlots)
return legionariesSlots
self.as_highlightSlotsS(availableSlots)
return availableSlots
def _updateRallyData(self):
functional = self.unitFunctional
data = vo_converters.makeStaticFormationUnitVO(functional, unitIdx=functional.getUnitIdx(), app=self.app)
self.as_updateRallyS(data)
def _setActionButtonState(self):
self.as_setActionButtonStateS(ActionButtonStateVO(self.unitFunctional))
def _getVehicleSelectorDescription(self):
return CYBERSPORT.WINDOW_VEHICLESELECTOR_INFO_UNIT
def _populate(self):
super(StaticFormationUnitView, self)._populate()
self.startClubListening(self.__clubDBID)
settings = self.unitFunctional.getRosterSettings()
self._updateVehiclesLabel(int2roman(settings.getMinLevel()), int2roman(settings.getMaxLevel()))
self.__updateHeader()
_, unit = self.unitFunctional.getUnit()
self.as_setLegionnairesCountS(False, self.__makeLegionnairesCountString(unit))
self._updateVehiclesLabel(int2roman(settings.getMinLevel()), int2roman(settings.getMaxLevel()))
self.clubsCtrl.getAvailabilityCtrl().onStatusChanged += self.onStatusChanged
def _dispose(self):
self.ABSENT_VALUES = None
self.__extra = None
self.stopClubListening(self.__clubDBID)
self.clubsCtrl.getAvailabilityCtrl().onStatusChanged -= self.onStatusChanged
super(StaticFormationUnitView, self)._dispose()
return
def __updateHeader(self):
club = self.clubsCtrl.getClub(self.__clubDBID)
canSetRanked = club is not None and club.getPermissions().canSetRanked()
seasonState = self.clubsCtrl.getSeasonState()
modeLabel = ''
modeTooltip = ''
modeTooltipType = ''
isFixedMode = True
isModeTooltip = False
if self.__extra.isRatedBattle:
isFixedMode = not canSetRanked
if canSetRanked:
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_SETUNRANKEDMODE
else:
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_RANKEDMODE
elif seasonState.isSuspended():
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONPAUSED
isModeTooltip = True
modeTooltipType = TOOLTIPS_CONSTANTS.COMPLEX
modeTooltip = makeTooltip(CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONPAUSEDTOOLTIP_HEADER, CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONPAUSEDTOOLTIP_BODY)
elif seasonState.isFinished():
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONFINISHED
isModeTooltip = True
modeTooltipType = TOOLTIPS_CONSTANTS.COMPLEX
modeTooltip = makeTooltip(CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONFINISHEDTOOLTIP_HEADER, CYBERSPORT.STATICFORMATION_UNITVIEW_MODECHANGEWARNING_SEASONFINISHEDTOOLTIP_BODY)
elif canSetRanked:
isFixedMode = False
modeLabel = CYBERSPORT.STATICFORMATION_UNITVIEW_SETRANKEDMODE
if len(modeLabel):
if canSetRanked and seasonState.isActive() or self.__extra.isRatedBattle:
modeLabel = text_styles.neutral(modeLabel)
else:
modeLabel = text_styles.standard(modeLabel)
if isHourInForbiddenList(self.clubsCtrl.getAvailabilityCtrl().getForbiddenHours()):
modeLabel = '{0}{1}'.format(icons.alert(), text_styles.main(CYBERSPORT.LADDERREGULATIONS_WARNING))
isFixedMode = True
isModeTooltip = True
modeTooltipType = TOOLTIPS_CONSTANTS.LADDER_REGULATIONS
bgSource = RES_ICONS.MAPS_ICONS_LIBRARY_CYBERSPORT_LEAGUERIBBONS_UNRANKED
battles = self.ABSENT_VALUES
winRate = self.ABSENT_VALUES
leagueIcon = getLadderChevron64x64()
enableWinRateTF = False
if club is not None:
clubTotalStats = club.getTotalDossier().getTotalStats()
battles = BigWorld.wg_getNiceNumberFormat(clubTotalStats.getBattlesCount())
division = club.getLadderInfo().division
leagueIcon = getLadderChevron64x64(division)
winRateValue = ProfileUtils.getValueOrUnavailable(clubTotalStats.getWinsEfficiency())
if winRateValue != ProfileUtils.UNAVAILABLE_VALUE:
enableWinRateTF = True
winRate = ProfileUtils.formatFloatPercent(winRateValue)
else:
winRate = self.ABSENT_VALUES
if self.__extra.isRatedBattle:
bgSource = getLadderBackground(division)
self.requestClubEmblem64x64(club.getClubDbID(), club.getEmblem64x64())
self.as_setHeaderDataS({'clubId': self.__extra.clubDBID,
'teamName': self.__extra.clubName,
'isRankedMode': bool(self.__extra.isRatedBattle),
'battles': battles,
'winRate': winRate,
'enableWinRateTF': enableWinRateTF,
'leagueIcon': leagueIcon,
'isFixedMode': isFixedMode,
'modeLabel': modeLabel,
'modeTooltip': modeTooltip,
'bgSource': bgSource,
'modeTooltipType': modeTooltipType,
'isModeTooltip': isModeTooltip})
return
def __updateTotalData(self):
functional = self.unitFunctional
unitStats = functional.getStats()
canDoAction, restriction = functional.validateLevels(stats=unitStats)
self.as_setTotalLabelS(canDoAction, vo_converters.makeTotalLevelLabel(unitStats, restriction), unitStats.curTotalLevel)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\cybersport\staticformationunitview.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:53:52 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
43e7a13868552bc1df7a5b30ac083213d31dd08d
|
60e43b6e98ce173d3820530d90c7300a26b452e0
|
/Scripts/zls/__init__.py
|
59028eb6790487e1b0a49f22a0322e09d5040b50
|
[] |
no_license
|
zolcsielesosvr/OSVR
|
32945e8294ff1894152469be48dc91a482b2ed7a
|
187054a14265651d8caa20234654851d7845a374
|
refs/heads/master
| 2020-04-24T06:32:39.448864
| 2019-02-24T21:50:06
| 2019-02-24T21:50:06
| 171,768,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
from .SafeDirs import *
from .Platform import *
from .CMake import *
|
[
"zolcsieles@gmail.com"
] |
zolcsieles@gmail.com
|
8bf72af94dd7843e5a21bcb666896146201ea8a4
|
461a9e33aa76cd6f82f7e5ff23a1fafdc9d76e68
|
/apps/departamentos/migrations/0001_initial.py
|
93efdfe77a3549cb8590716eb56320bf2b8ba309
|
[] |
no_license
|
alex7alves/Gestao_rh
|
9bea77a62ca22701555465dd291ee8322da95e16
|
83d7f65cdf1a253cc59e24306874a0d0139ef2ca
|
refs/heads/master
| 2020-08-02T13:52:28.041087
| 2019-10-10T18:37:39
| 2019-10-10T18:37:39
| 211,376,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# Generated by Django 2.2.5 on 2019-09-25 23:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Departamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(help_text='Nome do funcionario', max_length=200)),
],
),
]
|
[
"alex7alves10@hotmail.com"
] |
alex7alves10@hotmail.com
|
77babb978bf5214f4c148626cb7267f0b261cb65
|
e151da6d888a3720b608bb40f607e3f9fc6ab16e
|
/code/dm.py
|
41d77a38f7fe147dbb4f24c1ec325671d962d6c2
|
[] |
no_license
|
Bearbobs/Mr-downloader
|
60f7ceb263372df0b8b968dcd68000e5b662b912
|
69ff898152b415bf0c7fcfe59276a1acd0aa6365
|
refs/heads/master
| 2020-05-22T16:05:54.622447
| 2020-01-27T06:34:35
| 2020-01-27T06:34:35
| 84,701,923
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
import urllib.request
class downloader(QDialog):
def __init__(self):
QDialog.__init__(self)
layout=QVBoxLayout()
lab1= QLabel(" Hi!!")
lab2= QLabel(" I am Mr. Downloader")
lab3= QLabel(" I can help you with your downloading ")
lab4= QLabel("press ctrl+j to initiate another parallel download")
lab5= QLabel(" copyright Anuj Kapoor 2K17")
self.url = QLineEdit()
self.save_location = QLineEdit()
self.progress = QProgressBar()
browse = QPushButton("Browse")
download = QPushButton("Download")
self.url.setPlaceholderText("URL")
self.save_location.setPlaceholderText("File Save Location")
self.progress.setValue(0)
self.progress.setAlignment(Qt.AlignHCenter)
layout.addWidget(lab1)
layout.addWidget(lab2)
layout.addWidget(lab3)
layout.addWidget(self.url)
layout.addWidget(self.save_location)
layout.addWidget(browse)
layout.addWidget(self.progress)
layout.addWidget(download)
layout.addWidget(lab4)
layout.addWidget(lab5)
self.setLayout(layout)
self.setWindowTitle("Mr.Downloader")
self.setFocus()
browse.clicked.connect(self.browse_file)
download.clicked.connect(self.download)
def browse_file(self):
save_file = QFileDialog.getSaveFileName(self, caption="Save File As", directory=".",
filter="All Files (*.*)")
self.save_location.setText(QDir.toNativeSeparators(save_file))
def download(self):
url = self.url.text()
save_location = self.save_location.text()
try:
urllib.request.urlretrieve(url, save_location, self.report)
except Exception:
QMessageBox.warning(self, "Warning", "The download failed")
return
QMessageBox.information(self, "Information", "The download is complete")
self.progress.setValue(0)
self.url.setText("")
self.save_location.setText("")
def report(self,blocknum,blocksize,totalsize):
a=blocknum*blocksize
if totalsize > 0:
percent=(a/totalsize)*100
self.progress.setValue(int(percent))
app=QApplication(sys.argv)
dialog=downloader()
dialog.show()
app.exec_()
|
[
"noreply@github.com"
] |
noreply@github.com
|
abcc634e9e5d0c71bae568b10d3d3802d2dfd3c8
|
21589bcc4596cebee4dee843da139e7a18cd2209
|
/t1.py
|
e1ee97c0de10a52c4bab1d75e69fa21ff40062df
|
[] |
no_license
|
liangxiaobi/tornado_example
|
3be55b75a3057db4bb1f48c552f5f477a63075d5
|
ef84c24335999a6e6f4f679844a53531d83bbc58
|
refs/heads/master
| 2016-09-06T04:30:10.849272
| 2013-05-21T08:04:31
| 2013-05-21T08:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,371
|
py
|
'''
Created on 2013-5-21
@author: lion
'''
from tornado.httpclient import AsyncHTTPClient
import os
import tornado.ioloop
import tornado.web
#
class MainHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
self.write("Hello,world")
self.finish()
class StoryHandler(tornado.web.RequestHandler):
def get(self, story_id):
self.write("You requested the story " + story_id)
class TemplateHandler(tornado.web.RequestHandler):
def get(self):
items = ["Item 1", "Item 2", "Item 3"]
self.render("template.html", title="My title", items=items)
class CookieHandler(tornado.web.RequestHandler):
def get(self):
if not self.get_cookie("mycookie"):
self.set_cookie("mycookie", "myvalue")
self.set_secure_cookie("mysecurecookie", "mysecurecookievalue")
self.write("Your cookie was not set yet,now set cookie mycookie!")
else:
self.write("Your cookie was set: " + self.get_cookie("mycookie")+" securitycookie:"+ self.get_secure_cookie("mysecurecookie"))
class MyFormHandler(tornado.web.RequestHandler):
def get(self):
if not self.user_is_logged_in():
raise tornado.web.HTTPError(403)
self.write('<html><body><form action="/myform" method="post">'
'<input type="text" name="message">'
'<input type="submit" value="Submit">'
'</form></body></html>')
def post(self):
self.set_header("Content-Type", "text/plain")
self.write("You wrote " + self.get_argument("message"))
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_id = self.get_secure_cookie("user")
if not user_id: return None
return self.backend.get_user_by_id(user_id)
def get_user_locale(self):
if "locale" not in self.current_user.prefs:
# Use the Accept-Language header
return None
return self.current_user.prefs["locale"]
class UserHandler(BaseHandler):
def get(self):
print self.current_user
if not self.current_user:
self.redirect("/login")
return
name = tornado.escape.xhtml_escape(self.current_user)
self.write("Hello, " + name)
class LoginHandler(BaseHandler):
def get(self):
self.write('<html><body><form action="/login" method="post">'
'Name: <input type="text" name="name">'
'<input type="submit" value="Sign in">'
'</form></body></html>')
def post(self):
print self.get_argument("name")
self.set_secure_cookie("user", self.get_argument("name"))
self.redirect("/user")
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self._on_auth)
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
self.authenticate_redirect()
return
# Save the user with, e.g., set_secure_cookie()
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
"login_url": "/login",
"xsrf_cookies": False,
}
application = tornado.web.Application([
(r"/",MainHandler),
(r"/story/([0-9]+)",StoryHandler),
(r"/myform",MyFormHandler),
(r"/template",TemplateHandler),
(r"/cookie",CookieHandler),
(r"/static/tornado-0.2.tar.gz", tornado.web.RedirectHandler,dict(url="https://github.com/downloads/facebook/tornado/tornado-0.2.tar.gz")),
(r"/foo", tornado.web.RedirectHandler, {"url":"/bar", "permanent":False}),
(r"/user", UserHandler),
(r"/login", LoginHandler),
(r"/(apple-touch-icon\.png)", tornado.web.StaticFileHandler,
dict(path=settings['static_path'])),
],**settings)
if __name__ == '__main__':
#AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
tornado.locale.load_translations(os.path.join(os.path.dirname(__file__), "translations"))
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
pass
|
[
"11315889@qq.com"
] |
11315889@qq.com
|
8f9eece8ab82dc05dfd4a860532dd48acb31d2a9
|
b800b818a4d405b5fc6a152309563db9f68eacdb
|
/fmp-django/menu/migrations/0006_menu.py
|
150770038e1a15c77e14436bae2c6079984c327f
|
[
"Unlicense"
] |
permissive
|
dave-leblanc/family-menu-planning
|
2bff7e27461c6094c7959817ecc30f6271a300bd
|
00e6b300837f7857a7c0f3de5ef175cf77c7a2e3
|
refs/heads/main
| 2023-02-27T11:56:14.770431
| 2021-02-08T02:27:03
| 2021-02-08T02:27:03
| 333,241,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
# Generated by Django 3.1.5 on 2021-01-30 04:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('menu', '0005_auto_20210127_0200'),
]
operations = [
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.recipe')),
],
),
]
|
[
"dave.leblanc@gmail.com"
] |
dave.leblanc@gmail.com
|
274793a531a7e276d248de7dd9803cdc8f253a90
|
d9d4ebdefcdd271b8710c93d7e8dfddc78877eec
|
/python3/go.py
|
5e60c7dfcc87ac81190b71e74be7a8725f3a79cf
|
[] |
no_license
|
isim95/uniswap-demo
|
e9ff14e4647afe33258839c067b080278fb9502e
|
33d02ba17dd511f945fa47ce0271874ad6f903a6
|
refs/heads/master
| 2022-03-04T21:07:46.061252
| 2019-03-16T20:52:42
| 2019-03-16T20:52:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
#!/usr/bin/env python3
from web3.auto import w3
print(w3.eth.blockNumber);
if (w3.eth.blockNumber == 0):
raise Exception("Syncing")
print(w3.eth.getBlock('latest'));
abi = [{"name": "NewExchange", "inputs": [{"type": "address", "name": "token", "indexed": True}, {"type": "address", "name": "exchange", "indexed": True}], "anonymous": False, "type": "event"}]
uniswap = w3.eth.contract('0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95', abi=abi)
past_events = uniswap.events.NewExchange.createFilter(fromBlock=6627917).get_all_entries()
# TODO: subscribe to future events, too
token_exchange = {e.args.token: e.args.exchange for e in past_events}
for token, exchange in token_exchange.items():
print(token, exchange)
|
[
"bryan@stitthappens.com"
] |
bryan@stitthappens.com
|
608cdd8f3ba70d04d61965c29c90aa8f48c1ad16
|
77098dac5eccbc041c68da22f6bf4880318e523d
|
/test_app/urls.py
|
b072be48f857ae9227a96d5a0e3f51239a82c1c7
|
[] |
no_license
|
NineMan/test_app
|
d458fcd6640b015e5ea18745715d6e7388551108
|
bdacb200fd5fd5875da4a210816d709f0bb94d97
|
refs/heads/master
| 2023-03-30T13:10:30.813862
| 2021-04-05T08:17:24
| 2021-04-05T08:17:24
| 354,690,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('pages.urls')),
path('django-rq/', include('django_rq.urls'))
]
|
[
"m.nelyubin@neovox.ru"
] |
m.nelyubin@neovox.ru
|
44dc7ace3c96940a36a7ea468124c78e03900455
|
1620e0af4a522db2bac16ef9c02ac5b5a4569d70
|
/Ekeopara_Praise/Phase 2/LIST/Day44 Tasks/Task4.py
|
e3f303c59954e7bec5cf6fa62b4f49925de56d80
|
[
"MIT"
] |
permissive
|
Ekeopara-Praise/python-challenge-solutions
|
cda07902c9ffc09ba770ae7776e5e01026406a05
|
068b67c05524b5c5a0d6084315eca3424c768421
|
refs/heads/master
| 2022-12-15T15:29:03.031583
| 2020-09-25T06:46:27
| 2020-09-25T06:46:27
| 263,758,530
| 2
| 0
| null | 2020-05-13T22:37:33
| 2020-05-13T22:37:32
| null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
'''4. Write a Python program to concatenate elements of a list. '''
num = ['1', '2', '3', '4', '5']
print('-'.join(num))
print(''.join(num))
|
[
"ekeoparapraise@gmail.com"
] |
ekeoparapraise@gmail.com
|
f3c023e08f3d224740ac57a5cf8cc6ef2f636636
|
8e25201a060b5acd09670846cd529d005584a4fa
|
/smirp/build_model.py
|
da6e8865b33ee08cfb73096c814b04ee887eb205
|
[] |
no_license
|
CU-BIC/SMIRP
|
2d3e88053d78b9745cec785d05d2d6436284134d
|
9ea46ab4133b85e817d3c683c03f0e62b93202b3
|
refs/heads/master
| 2020-04-07T19:24:10.399804
| 2018-11-22T14:48:27
| 2018-11-22T14:48:27
| 158,647,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
import sys
import getopt
from subprocess import call
from classes.FeatureSet import FeatureSet
# Parameters:
#
# -p: File name for positive feature set (any file type)
# -n: File name for negative feature set (any file type)
# -o: Name of output LibSVM model
opts, extraparams = getopt.getopt(sys.argv[1:], 'o:p:n:')
for o,p in opts:
if o == '-p':
posPath = p
if o == '-n':
negPath = p
if o == '-o':
outPath = p
# Aggregate inputs, export to libsvm file
fs = FeatureSet()
fs.load('data/'+posPath, patternClass = 'real')
fs.add_instances('data/'+negPath, patternClass = 'pseudo')
fs.weka_smote()
fs.libsvm_scale(paramOut = 'models/'+outPath+'.scale')
fs.export('tmp.libsvm')
# Build model
call('svm-train -c 10000000 -d 1 -h 1 -e 0.001 -g 0.0019531 -b 1 tmp.libsvm models/'+outPath+'.model', shell=True)
# Clean up
call('rm tmp.libsvm', shell=True)
|
[
"francoischarih@sce.carleton.com"
] |
francoischarih@sce.carleton.com
|
ef8433f6bae0df2f57342d5ef4f9efcd844ecde0
|
ddf1267a1a7cb01e70e3b12ad4a7bfaf291edb3e
|
/src/search/tasks.py
|
2c428bb843cb84de7aa107d3c9693be9e16496f7
|
[
"MIT"
] |
permissive
|
Garinmckayl/researchhub-backend
|
46a17513c2c9928e51db4b2ce5a5b62df453f066
|
cd135076d9a3b49a08456f7ca3bb18ff35a78b95
|
refs/heads/master
| 2023-06-17T04:37:23.041787
| 2021-05-18T01:26:46
| 2021-05-18T01:26:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,339
|
py
|
from oauth.utils import get_orcid_works, check_doi_in_works
from paper.models import Paper
from paper.utils import download_pdf
from researchhub.celery import app
from utils.orcid import orcid_api
from user.models import Author
from purchase.models import Wallet
VALID_LICENSES = []
@app.task
def download_pdf_by_license(item, paper_id):
try:
licenses = item['license']
for license in licenses:
if license in VALID_LICENSES:
pdf, filename = get_pdf_and_filename(item['links'])
paper = Paper.objects.get(pk=paper_id)
paper.file.save(filename, pdf)
paper.save(update_fields=['file'])
break
except Exception:
pass
def get_pdf_and_filename(links):
for link in links:
if link['content-type'] == 'application/pdf':
return download_pdf(link['URL'])
return None, None
@app.task
def create_authors_from_crossref(crossref_authors, paper_id, paper_doi):
paper = None
try:
paper = Paper.objects.get(pk=paper_id)
except Paper.DoesNotExist:
pass
for crossref_author in crossref_authors:
try:
first_name = crossref_author['given']
last_name = crossref_author['family']
except KeyError:
break
affiliation = None
if len(crossref_author['affiliation']) > 0:
FIRST = 0
affiliation = crossref_author['affiliation'][FIRST]['name']
try:
orcid_id = crossref_author['ORCID'].split('/')[-1]
get_or_create_orcid_author(orcid_id, first_name, last_name, paper)
except KeyError:
orcid_authors = search_orcid_author(
first_name,
last_name,
affiliation
)
for orcid_author in orcid_authors:
works = get_orcid_works(orcid_author)
if (len(works) > 0) and check_doi_in_works(paper_doi, works):
create_orcid_author(orcid_author, paper)
def search_orcid_author(given_names, family_name, affiliation=None):
matches = []
try:
author_name_results = orcid_api.search_by_name(
given_names,
family_name
)
authors = author_name_results.json()['result']
if authors is not None:
for author in authors:
uid = author['orcid-identifier']['path']
author_id_results = orcid_api.search_by_id(uid)
matches.append(author_id_results.json())
except Exception as e:
print(e)
return matches
def create_orcid_author(orcid_author, paper):
name = orcid_author['person']['name']
first_name = name['given-names']['value']
last_name = name['family-name']['value']
orcid_id = orcid_author['orcid-identifier']['path']
get_or_create_orcid_author(orcid_id, first_name, last_name, paper)
def get_or_create_orcid_author(orcid_id, first_name, last_name, paper):
author, created = Author.models.get_or_create(
orcid_id=orcid_id,
defaults={
'first_name': first_name,
'last_name': last_name,
}
)
wallet, _ = Wallet.models.get_or_create(
author=author
)
if paper is not None:
paper.authors.add(author)
|
[
"lightning.lu7@gmail.com"
] |
lightning.lu7@gmail.com
|
cac775ac5f84f62350398129fb0c01a7f572c352
|
ec633b1281c518f8c30ef8d261b15530872d635c
|
/ptd5.py
|
881d23f5f9aff853e72423cb0cc521765439b14d
|
[] |
no_license
|
tomekgaw/PodstawyTD
|
26e9efac262acfd1a6cc51837fbe41baa505b9c9
|
7f85b6f78a85288fb4adeb622f876847cb53ff79
|
refs/heads/master
| 2021-05-10T14:38:18.782861
| 2018-01-30T20:07:56
| 2018-01-30T20:07:56
| 118,526,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
from matplotlib import pyplot as plt
import numpy as np
import ptd4copy as ptd4
from itertools import accumulate
#######PSK
sp = [(np.sin(2 * np.pi * ptd4.fn * b/ ptd4.fs)) for b in range(1500)]
psk = np.multiply(ptd4.zp,sp)
def sumation(x):
xd = suma = 0
xt = []
for _ in np.arange(ptd4.M):
suma = 0
for _ in np.arange(ptd4.fs):
suma += x[xd]
xt.append(suma)
xd = xd + 1
return xt
def demod1(d,h):
s = []
it = np.arange(len(d))
for i in it:
if d[i] >= h:
s.append(1)
else:
s.append(0)
return s
# h1 =10
# xt = sumation(psk)
# ss = demod1(xt,h1)
# plt.figure()
# plt.subplot(3,1,1)
# plt.title('demodulacja sygnału PSK dla h = {0}'.format(h1))
# plt.xlabel('t')
# plt.ylabel('x(t)')
# plt.plot(psk)
# plt.subplot(3,1,2)
# plt.xlabel('t')
# plt.ylabel('p(t)')
# plt.plot(xt)
# plt.subplot(3,1,3)
# plt.xlabel('t')
# plt.ylabel('m(t)')
# plt.plot(ss)
# plt.savefig('lab5zad1PSK')
#
# ###############ASK
# sa = [ptd4.A2 * np.sin(2 * np.pi * ptd4.fn * b / ptd4.fs) for b in range(1500)]
# h2 = 35
# ask = np.multiply(ptd4.za,sp)
# s1 = sumation(ask)
# m2 = demod1(s1,h2)
#
# plt.figure()
# plt.subplot(3,1,1)
# plt.title('demodulacja sygnału ASK dla h = {0}'.format(h2))
# plt.xlabel('t')
# plt.ylabel('x(t)')
# plt.plot(ask)
# plt.subplot(3,1,2)
# plt.xlabel('t')
# plt.ylabel('p(t)')
# plt.plot(s1)
# plt.subplot(3,1,3)
# plt.xlabel('t')
# plt.ylabel('m(t)')
# plt.plot(m2)
# plt.savefig('lab5zad1ASK')
#
# ################################FSK
#
# sn1 = [np.sin(2 * np.pi * ptd4.fn1 * b / ptd4.fs) for b in range(1500)]
# sn2 = [np.sin(2 * np.pi * ptd4.fn2 * b / ptd4.fs) for b in range(1500)]
# xt1 = np.multiply(ptd4.zf,sn1)
# xt2 = np.multiply(ptd4.zf,sn2)
# px1 = sumation(xt1)
# px2 = sumation(xt2)
#
# pt = px1 + px2
# h3 = 23
# m3 = demod1(pt,h3)
#
# plt.figure()
# plt.subplot(2,1,1)
# plt.title('FSK x1(t) i px1(t)')
# plt.xlabel('t')
# plt.ylabel('x1(t)')
# plt.plot(xt1)
# plt.subplot(2,1,2)
# plt.xlabel('t')
# plt.ylabel('px2(t)')
# plt.plot(px1)
# plt.savefig('lab5FSKxt')
# plt.figure()
# plt.subplot(2,1,1)
# plt.title('FSK x2(t) i px2(t)')
# plt.xlabel('t')
# plt.ylabel('x1(t)')
# plt.plot(xt2)
# plt.subplot(2,1,2)
# plt.xlabel('t')
# plt.ylabel('px2(t)')
# plt.plot(px2)
# plt.savefig('lab5FSKpxt')
# plt.figure()
#
# plt.subplot(2,1,1)
# plt.title('demodulacja sygnału FSK dla h = {0}'.format(h3))
# plt.xlabel('t')
# plt.ylabel('p(t)')
# plt.plot(pt)
# plt.subplot(2,1,2)
# plt.xlabel('t')
# plt.ylabel('m(t)')
# plt.plot(m3)
# plt.savefig('lab5FSKmtpt')
#
# plt.show()
|
[
"tomekgaw@gmailc.com"
] |
tomekgaw@gmailc.com
|
07feee452428ecf97bd5edc3add50468a4a465d2
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_309/ch26_2019_08_19_14_12_37_089425.py
|
8dadd9e96c3304269c01917bc7478e247c45840a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
dias = int(input("dias:"))
horas = int(input("horas:"))
minutos = int(input("minutos:"))
segundos = int(input("segundos:"))
print((dias*86.400 + horas*3.600 + minutos*60 + segundos))
|
[
"you@example.com"
] |
you@example.com
|
46b56de9bf7ead1838fe58206ae4c91ce5bcfbb2
|
00792a90bfa302af8614f4a5f955c071ed320acf
|
/apps/control_params/tests.py
|
c93cb528dd87a34cb25ffb1ba372511159998b42
|
[] |
no_license
|
elcolie/HT6MInterface
|
dceb8f5e9b501b8836904559bd40259ccfe49085
|
04abf3cc73618c1cf059fa67da8a043ec9fb43b3
|
refs/heads/master
| 2022-02-04T08:02:49.023460
| 2021-01-21T06:55:39
| 2021-01-21T06:55:39
| 123,398,906
| 0
| 0
| null | 2022-01-21T20:20:29
| 2018-03-01T07:30:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,395
|
py
|
from django.test import TestCase
from apps.control_params.api.serializers import ControlParameterSerializer
from apps.control_params.models import ControlParameter
from apps.heating_params.models import HeatingParameter
class TestControlParameter(TestCase):
def setUp(self):
pass
def test_blank_initial_data(self):
"""If no supply then serializer will be stuck at `heating_params`"""
serializer = ControlParameterSerializer(data={})
assert False is serializer.is_valid()
def test_control_param_serializer_number_mismatch(self):
data = {
'no_break_point': 3,
'max_run_time': 10,
'heating_params': [
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "E",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "H",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
]
}
serializer = ControlParameterSerializer(data=data)
detail = f"Heating params count is mismatch with given number of break point"
assert False is serializer.is_valid()
assert detail == str(serializer.errors.get('heating_params')[0])
def test_control_param_serializer(self):
data = {
'no_break_point': 3,
'max_run_time': 10,
'heating_params': [
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "E",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "H",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
{
'break_point_number': 2,
'breakpoint_time': 0,
'timestep': 0.01,
'particle_species': "E",
'rate_of_particle_source': 0,
'radial_position': 0,
'radial_width': 0.5,
'nbi_power': 0,
'nbi_radial_position': 0,
'nbi_radial_width': 0.5,
'icrf_power': 0,
'icrf_radial': 0,
'icrf_radial_width': 0.5,
},
]
}
serializer = ControlParameterSerializer(data=data)
is_valid = serializer.is_valid()
serializer.save()
assert is_valid is serializer.is_valid()
assert 3 == HeatingParameter.objects.count()
assert 1 == ControlParameter.objects.count()
|
[
"sarit@elcolie.com"
] |
sarit@elcolie.com
|
b4a8f51c98e4d0b78246cec00a5ac088829be5c0
|
1e7b1e719fa9ab0694994575a7add8bf659e4808
|
/booksys/books/migrations/0001_initial.py
|
73fc1a974897ea558b6aac06fd35d8d41ce7fa66
|
[] |
no_license
|
IHaveGoal/Django
|
766092c6724435a39926b8e30af73961df4ca1af
|
84d0cb7065e63e481a8149da68eb69fe514fdaa7
|
refs/heads/master
| 2020-05-07T21:39:47.428977
| 2019-06-24T23:37:58
| 2019-06-24T23:37:58
| 180,912,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
# Generated by Django 2.1.7 on 2019-05-18 14:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=64, unique=True)),
('addr', models.CharField(max_length=128)),
],
),
migrations.AddField(
model_name='book',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.Publisher'),
),
migrations.AddField(
model_name='author',
name='book',
field=models.ManyToManyField(to='books.Book'),
),
]
|
[
"1043260502@qq.com"
] |
1043260502@qq.com
|
c59d88b870e80d47624f1902072e46caf9ed8d9d
|
889442ea4f90b6199763c1e973058c04a76d178e
|
/exceltodocx.py
|
6a675480a2b82be91a2b13b18b12c1a275614088
|
[] |
no_license
|
El-Ghiffari/SimpleTools
|
c49c08dddd3308949fb1ab79c24e58505d0439c7
|
f6edcf42c59c66d6acf8d7e6b6e80caddbf25eb0
|
refs/heads/master
| 2023-06-09T08:35:59.620817
| 2023-05-25T09:31:43
| 2023-05-25T09:31:43
| 293,282,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,424
|
py
|
import pandas as pd
import docx
from docx import Document
from docx.shared import Pt
#from docx.oxml.ns import nsdecls
#from docx.oxml import parse_xml
def create_vulnerability_tables(excel_file, docx_file):
# Read the Excel file into a pandas DataFrame
df = pd.read_excel(excel_file)
# Create a new Word document
doc = Document()
# Register Montserrat font in the document
doc.styles['Normal'].font.name = 'Montserrat'
# Iterate over the rows of the DataFrame
for _, row in df.iterrows():
# Get the vulnerability details from the row
vulnerability = row['Vulnerability']
severity = row['Severity']
cvss = row['CVSS']
impact = row['Impact']
no = row['No']
time = row['Time']
ip_domain = row['IP/DOMAIN']
endpoint = row['Endpoint']
platform = row['Platform']
description = row['Description']
poc = row['PoC']
recommendation = row['Recommendation']
references = row['References']
status = row['Status']
# Add the vulnerability name as Heading 2 with Montserrat font
doc.add_heading(vulnerability, level=2).style.font.name = 'Montserrat'
# Create a table with two columns
table = doc.add_table(rows=14, cols=2)
table.style = 'Table Grid'
table.autofit = True
# Set the column widths
# table.columns[0].width = Pt(100)
# table.columns[1].width = Pt(400)
# Set the cell values for each row
table.cell(0, 0).paragraphs[0].add_run('No').bold = True
table.cell(0, 1).text = str(no)
table.cell(1, 0).paragraphs[0].add_run('Time').bold = True
table.cell(1, 1).text = str(time)
table.cell(2, 0).paragraphs[0].add_run('IP/DOMAIN').bold = True
table.cell(2, 1).text = str(ip_domain)
table.cell(3, 0).paragraphs[0].add_run('Endpoint').bold = True
table.cell(3, 1).text = str(endpoint)
table.cell(4, 0).paragraphs[0].add_run('Platform').bold = True
table.cell(4, 1).text = str(platform)
table.cell(5, 0).paragraphs[0].add_run('Vulnerability').bold = True
table.cell(5, 1).text = str(vulnerability)
table.cell(6, 0).paragraphs[0].add_run('Description').bold = True
table.cell(6, 1).text = str(description)
table.cell(7, 0).paragraphs[0].add_run('Severity').bold = True
table.cell(7, 1).text = str(severity)
table.cell(8, 0).paragraphs[0].add_run('CVSS').bold = True
table.cell(8, 1).text = str(cvss)
table.cell(9, 0).paragraphs[0].add_run('Impact').bold = True
table.cell(9, 1).text = str(impact)
table.cell(10, 0).paragraphs[0].add_run('PoC').bold = True
poc_cell = table.cell(10, 1)
poc_hyperlink = poc_cell.paragraphs[0].add_run()
poc_hyperlink.text = str(poc)
poc_hyperlink.font.name = 'Montserrat'
poc_hyperlink.font.underline = True
poc_hyperlink.font.color.rgb = docx.shared.RGBColor(0x00, 0x00, 0xFF)
poc_hyperlink.hyperlink = poc
table.cell(11, 0).paragraphs[0].add_run('Recommendation').bold = True
table.cell(11, 1).text = str(recommendation)
table.cell(12, 0).paragraphs[0].add_run('References').bold = True
references_cell = table.cell(12, 1)
references_hyperlink = references_cell.paragraphs[0].add_run()
references_hyperlink.text = str(references)
references_hyperlink.font.name = 'Montserrat'
references_hyperlink.font.underline = True
references_hyperlink.font.color.rgb = docx.shared.RGBColor(0x00, 0x00, 0xFF)
references_hyperlink.hyperlink = references
table.cell(13, 0).paragraphs[0].add_run('Status').bold = True
table.cell(13, 1).text = str(status)
# Apply Montserrat font to the table
for row in table.rows:
for cell in row.cells:
for paragraph in cell.paragraphs:
for run in paragraph.runs:
run.font.name = 'Montserrat'
# Save the Word document
doc.save(docx_file)
# Example usage
excel_file_path = input("Your path to excel file : ")
docx_file_path = 'report.docx'
create_vulnerability_tables(excel_file_path, docx_file_path)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6f402e70790e857a892dd3c982f152b9d2b8fa41
|
b84c89d0ade21bf8c2df9d0cf8f94d7a27c2824b
|
/py2neo/text/table.py
|
6af607df167d03cbce805799244802ff457d8034
|
[
"Apache-2.0"
] |
permissive
|
srlabUsask/py2neo
|
931b06678561201d56a36ec10da7ad4614ab6c87
|
80d3cf1ab0b4cfb03b7824fd7a407b33c95a1e8f
|
refs/heads/master
| 2022-11-16T21:17:42.319698
| 2020-07-12T23:00:29
| 2020-07-12T23:00:29
| 279,281,481
| 0
| 0
|
Apache-2.0
| 2020-07-13T11:17:53
| 2020-07-13T11:17:50
| null |
UTF-8
|
Python
| false
| false
| 10,917
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, unicode_literals
from io import StringIO
from py2neo.compat import integer_types, numeric_types, string_types, ustr
from py2neo.cypher import cypher_repr, cypher_str
def html_escape(s):
return (s.replace(u"&", u"&")
.replace(u"<", u"<")
.replace(u">", u">")
.replace(u'"', u""")
.replace(u"'", u"'"))
class Table(list):
""" Immutable list of records.
"""
def __init__(self, records, keys=None):
super(Table, self).__init__(map(tuple, records))
if keys:
k = list(map(ustr, keys))
else:
try:
k = records.keys()
except AttributeError:
raise ValueError("Missing keys")
width = len(k)
t = [set() for _ in range(width)]
o = [False] * width
for record in self:
for i, value in enumerate(record):
if value is None:
o[i] = True
else:
t[i].add(type(value))
f = []
for i, _ in enumerate(k):
f.append({
"type": t[i].copy().pop() if len(t[i]) == 1 else tuple(t[i]),
"numeric": all(t_ in numeric_types for t_ in t[i]),
"optional": o[i],
})
self._keys = k
self._fields = f
def __repr__(self):
s = StringIO()
self.write(file=s, header=True)
return s.getvalue()
def _repr_html_(self):
""" Return a string containing an HTML representation of this table.
This method is used by Jupyter notebooks to display the table natively within a browser.
Internally, this method calls :meth:`.write_html` with `header=True`, writing the output into an ``io.StringIO`` instance.
"""
s = StringIO()
self.write_html(file=s, header=True)
return s.getvalue()
def keys(self):
""" Return a list of field names for this table.
"""
return list(self._keys)
def field(self, key):
""" Return a dictionary of metadata for a given field.
The metadata includes the following values:
`type`
Single class or tuple of classes representing the
field values.
`numeric`
:const:`True` if all field values are of a numeric
type, :const:`False` otherwise.
`optional`
:const:`True` if any field values are :const:`None`,
:const:`False` otherwise.
"""
if isinstance(key, integer_types):
return self._fields[key]
elif isinstance(key, string_types):
try:
index = self._keys.index(key)
except ValueError:
raise KeyError(key)
else:
return self._fields[index]
else:
raise TypeError(key)
def _range(self, skip, limit):
if skip is None:
skip = 0
if limit is None or skip + limit > len(self):
return range(skip, len(self))
else:
return range(skip, skip + limit)
def write(self, file=None, header=None, skip=None, limit=None, auto_align=True,
padding=1, separator=u"|", newline=u"\r\n"):
""" Write data to a human-readable ASCII art table.
:param file: file-like object capable of receiving output
:param header: boolean flag for addition of column headers
:param skip: number of records to skip before beginning output
:param limit: maximum number of records to include in output
:param auto_align: if :const:`True`, right-justify numeric values
:param padding: number of spaces to include between column separator and value
:param separator: column separator character
:param newline: newline character sequence
:return: the number of records included in output
"""
space = u" " * padding
widths = [1 if header else 0] * len(self._keys)
def calc_widths(values, **_):
strings = [cypher_str(value).splitlines(False) for value in values]
for i, s in enumerate(strings):
w = max(map(len, s)) if s else 0
if w > widths[i]:
widths[i] = w
def write_line(values, underline=u""):
strings = [cypher_str(value).splitlines(False) for value in values]
height = max(map(len, strings)) if strings else 1
for y in range(height):
line_text = u""
underline_text = u""
for x, _ in enumerate(values):
try:
text = strings[x][y]
except IndexError:
text = u""
if auto_align and self._fields[x]["numeric"]:
text = space + text.rjust(widths[x]) + space
u_text = underline * len(text)
else:
text = space + text.ljust(widths[x]) + space
u_text = underline * len(text)
if x > 0:
text = separator + text
u_text = separator + u_text
line_text += text
underline_text += u_text
if underline:
line_text += newline + underline_text
line_text += newline
print(line_text, end=u"", file=file)
def apply(f):
count = 0
for count, index in enumerate(self._range(skip, limit), start=1):
if count == 1 and header:
f(self.keys(), underline=u"-")
f(self[index])
return count
apply(calc_widths)
return apply(write_line)
def write_html(self, file=None, header=None, skip=None, limit=None, auto_align=True):
""" Write data to an HTML table.
:param file: file-like object capable of receiving output
:param header: boolean flag for addition of column headers
:param skip: number of records to skip before beginning output
:param limit: maximum number of records to include in output
:param auto_align: if :const:`True`, right-justify numeric values
:return: the number of records included in output
"""
def write_tr(values, tag):
print(u"<tr>", end="", file=file)
for i, value in enumerate(values):
if tag == "th":
template = u'<{}>{}</{}>'
elif auto_align and self._fields[i]["numeric"]:
template = u'<{} style="text-align:right">{}</{}>'
else:
template = u'<{} style="text-align:left">{}</{}>'
print(template.format(tag, html_escape(cypher_str(value)), tag), end="", file=file)
print(u"</tr>", end="", file=file)
count = 0
print(u"<table>", end="", file=file)
for count, index in enumerate(self._range(skip, limit), start=1):
if count == 1 and header:
write_tr(self.keys(), u"th")
write_tr(self[index], u"td")
print(u"</table>", end="", file=file)
return count
def write_separated_values(self, separator, file=None, header=None, skip=None, limit=None,
newline=u"\r\n", quote=u"\""):
""" Write data to a delimiter-separated file.
:param separator: field separator character
:param file: file-like object capable of receiving output
:param header: boolean flag or string style tag, such as 'i' or 'cyan',
for addition of column headers
:param skip: number of records to skip before beginning output
:param limit: maximum number of records to include in output
:param newline: newline character sequence
:param quote: quote character
:return: the number of records included in output
"""
escaped_quote = quote + quote
quotable = separator + newline + quote
def header_row(names):
from pansi import ansi
if isinstance(header, string_types):
if hasattr(ansi, header):
template = "{%s}{}{_}" % header
else:
t = [tag for tag in dir(ansi) if
not tag.startswith("_") and isinstance(getattr(ansi, tag), str)]
raise ValueError("Unknown style tag %r\n"
"Available tags are: %s" % (header, ", ".join(map(repr, t))))
else:
template = "{}"
for name in names:
yield template.format(name, **ansi)
def data_row(values):
for value in values:
if value is None:
yield ""
continue
if isinstance(value, string_types):
value = ustr(value)
if any(ch in value for ch in quotable):
value = quote + value.replace(quote, escaped_quote) + quote
else:
value = cypher_repr(value)
yield value
count = 0
for count, index in enumerate(self._range(skip, limit), start=1):
if count == 1 and header:
print(*header_row(self.keys()), sep=separator, end=newline, file=file)
print(*data_row(self[index]), sep=separator, end=newline, file=file)
return count
def write_csv(self, file=None, header=None, skip=None, limit=None):
""" Write the data as RFC4180-compatible comma-separated values.
This is a customised call to :meth:`.write_separated_values`.
"""
return self.write_separated_values(u",", file, header, skip, limit)
def write_tsv(self, file=None, header=None, skip=None, limit=None):
""" Write the data as tab-separated values.
This is a customised call to :meth:`.write_separated_values`.
"""
return self.write_separated_values(u"\t", file, header, skip, limit)
|
[
"noreply@github.com"
] |
noreply@github.com
|
0a551818b8e85dd12f84691ab34b3df1f13c138e
|
14ddda0c376f984d2a3f7dcd0ca7aebb7c49648d
|
/bnn_mcmc_examples/examples/mlp/noisy_xor/setting2/mcmc/metropolis_hastings/pilot_visual_summary.py
|
34d2bb1186d2c1da6be83394dc47fe6431283c68
|
[
"MIT"
] |
permissive
|
papamarkou/bnn_mcmc_examples
|
62dcd9cc0cf57cda39aa46c2f2f237bbcd2d35bb
|
7bb4ecfb33db4c30a8e61e31f528bda0efb24e3d
|
refs/heads/main
| 2023-07-12T20:51:28.302981
| 2021-08-22T13:06:17
| 2021-08-22T13:06:17
| 316,554,634
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
# %% Import packages
import kanga.plots as ps
from kanga.chains import ChainArray
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.mcmc.constants import diagnostic_iter_thres
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.mcmc.metropolis_hastings.constants import sampler_output_pilot_path
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting2.model import model
# %% Load chain array
chain_array = ChainArray.from_file(keys=['sample', 'accepted'], path=sampler_output_pilot_path)
# %% Drop burn-in samples
chain_array.vals['sample'] = chain_array.vals['sample'][diagnostic_iter_thres:, :]
chain_array.vals['accepted'] = chain_array.vals['accepted'][diagnostic_iter_thres:]
# %% Plot traces of simulated chain
for i in range(model.num_params()):
ps.trace(
chain_array.get_param(i),
title=r'Traceplot of $\theta_{{{}}}$'.format(i+1),
xlabel='Iteration',
ylabel='Parameter value'
)
# %% Plot running means of simulated chain
for i in range(model.num_params()):
ps.running_mean(
chain_array.get_param(i),
title=r'Running mean plot of parameter $\theta_{{{}}}$'.format(i+1),
xlabel='Iteration',
ylabel='Running mean'
)
# %% Plot histograms of marginals of simulated chain
for i in range(model.num_params()):
ps.hist(
chain_array.get_param(i),
bins=30,
density=True,
title=r'Histogram of parameter $\theta_{{{}}}$'.format(i+1),
xlabel='Parameter value',
ylabel='Parameter relative frequency'
)
|
[
"theodore.papamarkou@gmail.com"
] |
theodore.papamarkou@gmail.com
|
dd64f7eec6f6c0a278fb130622ec660cb9b77db2
|
7626985c23e2e0ee1361c7c4aaed8a2ed36567ce
|
/coursera.py
|
b766fa2478de671dbca69c619803d6f21ee3cfe9
|
[] |
no_license
|
fabiogeraci/Coursera
|
fff49b1b52beb64e47a75dc84fd9386c28a1362e
|
fba7aad40689443a750d7ee02474fd5e607a1181
|
refs/heads/main
| 2023-03-25T16:29:27.290311
| 2021-03-14T21:53:53
| 2021-03-14T21:53:53
| 332,873,168
| 0
| 0
| null | 2021-02-04T11:41:47
| 2021-01-25T20:23:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
class MusicSchool:
students = {"Gino": [15, "653-235-345", ["Piano", "Guitar"]],
"Talina": [28, "555-765-452", ["Cello"]],
"Eric": [12, "583-356-223", ["Singing"]]}
def __init__(self, working_hours, revenue):
self.working_hours = working_hours
self.revenue = revenue
# Add your methods below this line
def print_student_data(self):
for key, value in MusicSchool.students.items():
self.print_student(key, value)
def print_student(self, k, v):
print('Student: ' + k + ' who is ' + str(v[0]) + ' years old and is taking ' + str(v[2]))
def add_student(self, new_student, age, contact, instrument):
MusicSchool.students[new_student] = [age, contact, instrument]
return MusicSchool.students
# Create the instance
my_school = MusicSchool(8, 15000)
my_school.print_student_data()
# Call the methods
my_school.add_student('Fabio', 45, '5555555', ['guitar'])
with open('my_school_records.txt', 'w') as data:
for key, value in MusicSchool.students.items():
data.write(str(key) + ': ' + str(value) + '\n')
|
[
"fabio.geraci@gmail.com"
] |
fabio.geraci@gmail.com
|
d9b0b103fe9af36c303a318f55ab2bc223d3655d
|
e4f5eec71bed0406bed437aad71099177548baa5
|
/test/test_gensim.py
|
55036181ab4962a359da22075c0e2e97a48504b9
|
[
"Apache-2.0"
] |
permissive
|
smilelight/lightText
|
23c2c858635d791bb36c597ea620d23f59c1d8d7
|
b015d0e3524722fb5a8ee5ea83b7fbbd7408f797
|
refs/heads/master
| 2021-08-15T21:17:08.495864
| 2021-07-06T05:38:04
| 2021-07-06T05:38:04
| 168,789,109
| 14
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,958
|
py
|
# -*- coding: utf-8 -*-
from typing import List
import jieba.posseg as pseg
from gensim import corpora, models, similarities
NUM_TOPICS = 350
MODEL_PATH = 'saves/model'
DIC_PATH = 'saves/dic'
def tokenize(text: str) -> List[str]:
# {标点符号、连词、助词、副词、介词、时语素、‘的’、数词、方位词、代词}
# {'x', 'c', 'u', 'd', 'p', 't', 'uj', 'm', 'f', 'r'}
stop_flags = {'x', 'c', 'u', 'd', 'p', 't', 'uj', 'm', 'f', 'r'}
stop_words = {'nbsp', '\u3000', '\xa0'}
words = pseg.cut(text)
return [word for word, flag in words if flag not in stop_flags and word not in stop_words]
def get_dic_corpus(contents: List[str]):
texts = [tokenize(content) for content in contents]
dic = corpora.Dictionary(texts)
corpus = [dic.doc2bow(text) for text in texts]
return dic, corpus
def get_lsi_model(corpus, dic, num_topics: int = NUM_TOPICS):
lsi = models.LsiModel(corpus, id2word=dic, num_topics=len(dic.token2id))
index = similarities.MatrixSimilarity(lsi[corpus])
return lsi, index
def get_tfidf_model(corpus, dic):
model = models.TfidfModel(corpus, id2word=dic)
print(dic.token2id, type(dic.token2id))
index = similarities.MatrixSimilarity(model[corpus], num_features=len(dic.token2id))
return model, index
def get_lda_model(corpus, dic, num_topics: int = NUM_TOPICS):
model = models.LdaModel(corpus, id2word=dic, num_topics=len(dic.token2id))
index = similarities.MatrixSimilarity(model[corpus], num_features=len(dic.token2id))
return model, index
def get_test_mtx(texts: List[str], dic, model):
corpus = [dic.doc2bow(tokenize(text)) for text in texts]
idx = similarities.MatrixSimilarity(model[corpus], num_features=len(dic.token2id))
return idx
if __name__ == '__main__':
text = "测试曹操去东北,然后hello world!"
print(tokenize(text))
contents = [
'乔布斯极力推崇自己家的苹果手机',
'这苹果又大又圆又甜,还便宜',
'这年头,谁还用安卓手机,要么是苹果,要么是鸿蒙'
]
others = [
'许多超市里都有卖苹果的',
'比尔盖茨打算收购乔布斯的苹果手机'
]
dic, corpus = get_dic_corpus(contents)
text = '苹果手机还是小米手机呢?'
text_vec = dic.doc2bow(tokenize(text))
print(text_vec)
# 获取tfidf模型
# model, idx = get_tfidf_model(corpus, dic)
# 获取lsi模型
# model, idx = get_lsi_model(corpus, dic)
# print(model.print_topics())
# 获取lda模型
model, idx = get_lda_model(corpus, dic)
print(model.print_topics())
model.save(MODEL_PATH)
dic.save(DIC_PATH)
model = models.LdaModel.load(MODEL_PATH)
dic = corpora.Dictionary.load(DIC_PATH)
test_mtx = get_test_mtx(others, dic, model)
# sims = idx[model[text_vec]]
sims = test_mtx[model[text_vec]]
print(list(enumerate(sims)))
|
[
"iamlightsmile@qq.com"
] |
iamlightsmile@qq.com
|
b79dff80f9ba01f7effb651281466d39bbff7351
|
b6df7630818b1a96f695f9f781f2d4aee3b12c67
|
/spacy_cosine_similarity.py
|
50a8457d1245a52b1b5642ee0ae441fd6e137d6a
|
[
"Apache-2.0"
] |
permissive
|
JBAhire/semantic-similarity
|
6ad4903377297b3c4f4bbaa54c3c263e56421a46
|
2a5d93cde033d5fa18da2aa4ff3fc5d94bcd652e
|
refs/heads/master
| 2020-08-04T05:03:34.541160
| 2019-10-01T14:12:45
| 2019-10-01T14:12:45
| 212,016,418
| 4
| 1
|
Apache-2.0
| 2019-10-01T14:14:52
| 2019-10-01T05:04:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
import spacy
import pandas as pd
from common import similarity_test
nlp = spacy.load('en_core_web_md')
'''
common = similarity_test()
data = pd.read_csv('data/sick_dev.csv')
sent_1_list = list(data.sent_1)
data = common.normalize(data, ['sim'])
sim_list = list(data.sim)
sent_2_list = list(data.sent_2)
min_list1 = sent_1_list[:10]
min_list2 = sent_2_list[:10]
min_list3 = sim_list[:10]
for text1, text2 in zip(min_list1, min_list2):
search_doc = nlp(text1)
main_doc = nlp(text2)
search_doc_no_stop_words = nlp(' '.join([str(t) for t in search_doc if not t.is_stop]))
main_doc_no_stop_words = nlp(' '.join([str(t) for t in main_doc if not t.is_stop]))
print(search_doc_no_stop_words.similarity(main_doc_no_stop_words))
print('========================================================================')
'''
class spacy_semantic:
def vectorise(self, sentence):
text = nlp(sentence)
vector = nlp(' '.join([str(t) for t in text if not t.is_stop]))
return vector
def predict_similarity(self, text1, text2):
vector1 = self.vectorise(text1)
vector2 = self.vectorise(text2)
return vector1.similarity(vector2)
if __name__ == "__main__":
pass
|
[
"noreply@github.com"
] |
noreply@github.com
|
103c9cd7adacc5f7038df27ad25a009d5dbde33f
|
f4c2b016818be9b60825818af7f8a1fb77179069
|
/Protocols/Socket.py
|
fcb82f5a65676f532c72b30dd08a0b71561c83ae
|
[] |
no_license
|
werisonfernandes/python-course
|
f74f310fc1a16598014680fd7812eeae14e90c60
|
711e3c7e7fd6c0bc5a8d4d61953d1e941455561e
|
refs/heads/main
| 2023-04-18T11:46:09.003617
| 2021-04-29T14:54:46
| 2021-04-29T14:54:46
| 362,198,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
import Scoket
resp="S"
while(resp=="S"):
url=input("Digite uma url: ")
ip=Scoket.gethostbyname(url)
print("O IP referente a url informada eh: ", ip)
resp=input("Digite <s> para continuar: ").upper()
|
[
"waparecf@everis.com"
] |
waparecf@everis.com
|
d82be73e49649c2629a116486ba147d0066c196c
|
a5924186cb3b16a96b682357d0bec2af10d0b5a1
|
/model_evaluation/run_FedAvg_all.py
|
09d3f15f05a5c9417edc35c8340ebe3246609c28
|
[
"MIT"
] |
permissive
|
thesuperorange/deepMI3
|
b7cb8dca34e5e00fc932c0dc9d07d28f7d7acf01
|
e0dc57b7e3863d3ed1061b9562984e8d8f403cc5
|
refs/heads/master
| 2022-08-28T22:25:42.161702
| 2022-08-16T18:18:33
| 2022-08-16T18:18:33
| 246,145,167
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,297
|
py
|
import os
import pascalvoc
import shutil
if __name__ == '__main__':
scene_list = ['Pathway','Doorway']#['Room','Staircase','Bus'] #,'Pathway','Doorway''Staircase']
mode = 'FedWCD' #FedAvg
DATASET='MI3'
ROUND_NUM = 10
NET='vgg16'
wcd_all=True
#isAll = True
for isAll in (True, False):
# dataset_list = ['campus','road','downtown']
# for dataset in dataset_list:
for i,scene in enumerate(scene_list):
sub_folder=mode+'_no'+scene
if wcd_all:
sub_folder = sub_folder+'_all'
for r in range(1,ROUND_NUM+1):
output_folder = sub_folder+'/'+DATASET+'_fasterRCNN_'+NET+'-AVG_'+str(r)+'/'
# for epoch in range(3):
# gtFolder = "/home/superorange5/data/KAIST/test_annotations/visible/campus"
detFolder = "/home/superorange5/Research/FedWCD/output/"+output_folder
if isAll:
detFolder = detFolder+"detection_results/"
gtFolder = "/home/superorange5/MI3_dataset/MI3_dataset/Ann_txt/"
else:
input_dir = detFolder+"detection_results/"
detFolder = detFolder+scene+"/"
gtFolder = "/home/superorange5/MI3_dataset/MI3_dataset_bydataset/"+scene+"_txt/"
#------ copy target data to another folder--------------
for file in os.listdir(input_dir):
if scene in file:
if not os.path.exists(detFolder):
os.makedirs(detFolder)
#print("copy {} to {}".format(os.path.join(input_dir, file),os.path.join(output_dir, file)))
shutil.copy2(os.path.join(input_dir, file), os.path.join(detFolder, file))
#---------------------------#
gtformat = 'xyrb'
detformat = 'xyrb'
confidence_TH = 0
iou_TH = 0.5
output_str = pascalvoc.evaluation(gtFolder, detFolder, iou_TH, gtformat, detformat, None, confidence_TH=confidence_TH, range=None)
print(scene+','+str(isAll)+','+str(r)+',' + output_str)
|
[
"1503054@narlabs.org.tw"
] |
1503054@narlabs.org.tw
|
75f7352215afcb05eba0feab15b9bd1c6ea0fb35
|
ba0487cc0850502b7c09a16f886a7ecc406582af
|
/sender.py
|
7ac29e850782f8d77852616e378c1e2be24a331f
|
[
"MIT"
] |
permissive
|
CristianRod21/YOLO_vs_RCNN_qiBullet
|
0f7cd5a587b5e8605773db605b6a16bfc0ea88eb
|
6f8222e87e7d59920c1370ae1b3d1693f2ec27e2
|
refs/heads/master
| 2022-11-10T16:09:27.311053
| 2020-07-01T17:19:29
| 2020-07-01T17:19:29
| 267,651,272
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
from npsocket import SocketNumpyArray
import cv2
import sys
import pybullet
import pybullet_data
import time
from qibullet import SimulationManager
from qibullet import NaoVirtual
from qibullet import PepperVirtual
from qibullet import RomeoVirtual
import time
sim_manager = SimulationManager()
client = sim_manager.launchSimulation(gui=True)
# client_direct_1 = sim_manager.launchSimulation(gui=False)
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())
print(pybullet_data.getDataPath())
pybullet.loadURDF('plane.urdf')
table = pybullet.loadURDF('table/table.urdf',
basePosition= [0.5,2,0],
physicsClientId=client)
item_in_table = pybullet.loadURDF('teddy_vhacd.urdf',
basePosition= [0.5,2,1],
globalScaling = 5.0,
physicsClientId=client)
item_in_table2 = pybullet.loadURDF('bicycle/bike.urdf',
basePosition= [0.5,2.5,1],
globalScaling = 0.7,
physicsClientId=client)
#walls2 = pybullet.loadURDF('C:\\Users\\cjrs2\\Downloads\\keras-yolo3\\proyecto_robo\\walls2.urdf',
# physicsClientId=client)
# nao = sim_manager.spawnNao(
# client,
# translation=[0.5,2,1],
# quaternion=pybullet.getQuaternionFromEuler([0, 0, 3]))
pepper = sim_manager.spawnPepper(
client,
translation=[0, -2, 0],
quaternion=pybullet.getQuaternionFromEuler([0, 0, 1.5]))
# nao.goToPosture('StandInit', 1)
pepper.goToPosture("Stand", 1)
sock_sender = SocketNumpyArray()
sock_sender.initialize_sender('localhost', 9995)
# nao.setAngles('HeadPitch', 0.25, 1)
handle = pepper.subscribeCamera(PepperVirtual.ID_CAMERA_TOP )
print('Retriving camera frame')
#x = 0
frames = 0
# while True:
while True:
start_time = time.time()
frame = pepper.getCameraFrame(handle)
print('sending frame')
sock_sender.send_numpy_array(frame)
pepper.unsuscribeCamera(PepperVirtual.ID_CAMERA_TOP)
|
[
"noreply@github.com"
] |
noreply@github.com
|
cde77cd5f408ae6c900695c6a8911f4f533f2713
|
e3f9b7a583d38e8c4460754d7d01b87d145ebec3
|
/accounts/forms.py
|
2fa5e55849b3f4f21a8b6ff0223b09f2c871bceb
|
[] |
no_license
|
Kanurow/zuri_crud
|
3a82021e6ab07f9d6ae91d15dd68e0e17fa67078
|
045abf09473caa6610fcafa2a5517977fab058a6
|
refs/heads/main
| 2023-04-27T15:58:15.008751
| 2021-05-03T23:55:56
| 2021-05-03T23:55:56
| 361,911,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login, authenticate
from django.contrib.auth.models import User
from django import forms
class RegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
|
[
"kanurowland92@gmail.com"
] |
kanurowland92@gmail.com
|
ca13fd7a6424d211800459fd5ffd7561b385d1a8
|
a26f4238d17b3cc93edf1cfecaea735e7e905a22
|
/problemSheets/flask/hello.py
|
f41203779f9bc35e00e84e27eeff8e94968d5413
|
[] |
no_license
|
Ryan-Gordon/Data-Rep-and-Querying
|
b513a6746dbe7503412e4d4cdf19afde10d5bb13
|
864dbf6adfdef1c44850fa74f67ded83f71e99e2
|
refs/heads/master
| 2021-06-09T15:28:50.710120
| 2016-12-22T11:34:54
| 2016-12-22T11:34:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
from flask import Flask, render_template, request
app = Flask(__name__)
# Hello
@app.route("/")
def hello():
return "Hello World!"
# Simple hello function
@app.route("/name")
def name():
return "Hello Ryan!"
# Route takes input based on URL provide /name/Ryan provides the param user
@app.route("/name/<user>")
def helloName(user):
return "Hello Student {}".format(user)
@app.route("/signup")
def signup():
email = request.form['email']
print("The email address is '" + email + "'")
# Loads a page from the templates folder called index.html
@app.route("/<string:page_name>/")
def template_page(page_name):
return render_template('%s.html' % page_name)
@app.route("/")
def static_page():
return current_app.send_static_file('index.html')
@app.route('/student')
def student():
return render_template('student.html')
@app.route('/result',methods = ['POST', 'GET'])
def result():
if request.method == 'POST':
result = request.form
return render_template("result.html",result = result)
# This statement must be after the route definitions
# Will cause a breakpoint, or will not render anything below the snippet
if __name__ == "__main__":
app.run()
|
[
"ryangordon210@gmail.com"
] |
ryangordon210@gmail.com
|
3888d78296a2ddbc02eb9eceebb9ee68652b9c3c
|
0a89b8ecca5912cbf086f17de25d543c17d225a0
|
/LABS/Pickle/pickle-email.py
|
fc67297735e8d99054ff71c70b7c34096eddaef7
|
[] |
no_license
|
meighanv/05-Python-Programming
|
0072051149b8ed3b668250b2c1d4d5503715b0b6
|
5493fc2adfbf5fbf5a03a1e027c269646b639332
|
refs/heads/master
| 2020-08-29T14:21:19.646870
| 2020-03-13T18:36:07
| 2020-03-13T18:36:07
| 218,059,331
| 0
| 0
| null | 2019-10-28T13:58:16
| 2019-10-28T13:58:16
| null |
UTF-8
|
Python
| false
| false
| 5,622
|
py
|
"""
8. Name and Email Addresses
Write a program that keeps names and email addresses in a dictionary as key-value pairs.
The program should display a menu that lets the user look up a person’s email address, add
a new name and email address, change an existing email address, and delete an existing
name and email address. The program should pickle the dictionary and save it to a file
when the user exits the program. Each time the program starts, it should retrieve the dictionary from the file and unpickle it.
"""
import pickle
from os import path
#define main
def main():
#output_file = open('email.dat', 'wb')
#List of valid menu options
menuOptions = ['l','m','a','d','x']
#setting filename variable for check
filename = 'email.dat'
#Checking for existence of file; if it's missing it will create prompting for first entry
if path.exists(filename) == False:
freshStart(filename)
#Init selection for while loop
selection = 'b'
while selection.lower() != 'x':
selection = menu(menuOptions)
#lowers the input of to match against selections
if selection.lower() == 'l':
lookupEntry(filename)
elif selection.lower() == 'm':
modEntry(filename)
elif selection.lower() == 'a':
addEntry(filename)
elif selection.lower() == 'd':
removeEntry(filename)
elif selection.lower() == 'x':
#Print exit message
print('Good bye!')
else:
#Print error message if input doesn't match option
print('Incorrect Selection')
#This function is meant to address the first use of the program where the email file doesn't exist
def freshStart(filename):
#Opens filename in binary write mode
email_file = open(filename, 'wb+')
#Create empty dictionary
email_dict = {}
#Let's the user know why they are being prompted for initial entries
print('The file {} is not detected. Starting fresh; please provide the first entry: '.format(filename))
#Prompts for initial entry
email_dict.update({(input('Name: ')).lower(): (input('Email: ')).lower()})
#writes dictionary to binary file
pickle.dump(email_dict, email_file)
#close file
email_file.close()
def menu(options):
#Printing program menu
print('PROGRAM MENU')
print('E-mail Lookup (press L)')
print('Add an entry (press A)')
print('Modify an entry (press M)')
print('Delete an entry (press D)')
print('EXIT (press X)')
print('\n\n')
# Getting user input for menu option
selection = input('What would you like to do?')
# Input validation for menu selection
while selection.lower() not in options:
selection = input('Invalid selection. What would you like to do?\n')
print('\n')
return selection
#Function to add entry to existing binary data file
def addEntry(filename):
# Calling the readBinary function to read in the file as a dictionary
email_dict = readBinary(filename)
#Prompts for entry
email_dict.update({(input('Name: ')).lower(): (input('Email: ')).lower()})
#Opens the file on disk for writing
email_file = open(filename, 'wb')
#Dump data to file
pickle.dump(email_dict, email_file)
#close file
email_file.close()
def modEntry(filename):
# Calling the readBinary function to read in the file as a dictionary
email_dict = readBinary(filename)
# Print keys as options
print('Names\n-----------')
for i in email_dict:
print(i)
# Gets user input for entry they wish to change
query = input('Provide the name from above to change:\n')
# Prompts for email entry to modify
email_dict.update({query.lower(): (input('Email: ')).lower()})
# Opens the file on disk for writing
email_file = open(filename, 'wb')
# Dump data to file
pickle.dump(email_dict, email_file)
# close file
email_file.close()
def lookupEntry(filename):
# Calling the readBinary function to read in the file as a dictionary
email_dict = readBinary(filename)
# Gets user input for entry they wish to lookup
query = input('Provide the name to lookup:\n')
# Prints the email for the query or lets them know it's not found
print(email_dict.get(query.lower(), 'Name not found'))
print()
def removeEntry(filename):
# Calling the readBinary function to read in the file as a dictionary
email_dict = readBinary(filename)
# Print keys as options for removal
print('Names\n-----------')
for i in email_dict:
print(i)
# Gets user input for entry they wish to lookup
query = input('Provide the name to remove:\n')
# Deletes entry from dictionary
del email_dict[query]
# Verifies to the user that the entry was removed
print(email_dict.get(query.lower(), 'Information successfully removed.\n'))
print()
# Opens the file on disk for writing
email_file = open(filename, 'wb')
# Dump data to file
pickle.dump(email_dict, email_file)
# close file
email_file.close()
def readBinary(filename):
# Opening the file in read mode
email_file = open(filename, 'rb')
# Setting EOF to false
end_of_file = False
#Setting while loop to get each object in binary file
while not end_of_file:
try:
#unpickle next object
dictionary = pickle.load(email_file)
return dictionary
except EOFError:
#Set flag to indicate EOF reached
end_of_file = True
email_file.close()
main()
|
[
"jubei421@gmail.com"
] |
jubei421@gmail.com
|
c6a165b964fc2334873f89175bfb321c4dc5ec28
|
167b69b2cd1827a2226dbeb048377e6d21ba7dcd
|
/DL_24_class_function.py
|
98f9a58d0160c4fe611f370c11aba104abaece7b
|
[] |
no_license
|
Drunk-Mozart/card
|
67068f6c709531deecaa7fde020858f3cf193106
|
30e43e327721cf09ece5f531fe2a170e8dd7bf8c
|
refs/heads/master
| 2020-09-12T23:37:37.868822
| 2019-12-03T15:40:03
| 2019-12-03T15:40:03
| 222,594,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
# 如果不需要调用实例属性,则可以直接定义类方法或者静态方法
# 调用类属性的类方法@classmethod
class Tool(object):
count = 0
@classmethod
def show_tool_count(cls):
print("工具对象的数量:%d" % cls.count)
def __init__(self, name):
self.name = name
Tool.count += 1
tool = Tool("斧头")
Tool.show_tool_count()
# 静态方法
class Dog(object):
@staticmethod
def run():
print("小狗要跑")
Dog.run()
|
[
"longhr@mail2.sysu.edu.cn"
] |
longhr@mail2.sysu.edu.cn
|
a93859abc033a0d689816b491aef203030b77bc6
|
6571e0bda7da047d221dd635d668469d62057d02
|
/wort/core/config_registry.py
|
15e97aae4398be66ac40414f1f9e2e5e1c62a266
|
[] |
no_license
|
tttthomasssss/wort
|
9a3e1991cb2493d568f46912897beeb7e40e46e5
|
2d7f7ebeac7520a6124273477e7c7f07bf8aa22c
|
refs/heads/master
| 2021-04-09T17:42:34.758312
| 2016-05-12T08:46:33
| 2016-05-12T08:46:33
| 44,681,903
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,815
|
py
|
__author__ = 'thomas'
import os
import sqlite3
import uuid
class ConfigRegistry(object):
def __init__(self, path, min_frequency, lowercase, stop_words, encoding, max_features, preprocessor, tokenizer,
analyzer, token_pattern, decode_error, strip_accents, input, ngram_range, random_state, subsampling_rate,
wort_white_list, window_size, context_window_weighting, binary, weighting, cds, sppmi_shift):
self.db_path_ = os.path.join(path, 'wort_config_registry.sqlite')
self.min_frequency_ = min_frequency
self.lowercase_ = lowercase
self.stop_words_ = stop_words
self.encoding_ = encoding
self.max_features_ = max_features
self.preprocessor_ = preprocessor
self.tokenizer_ = tokenizer
self.analyzer_ = analyzer
self.token_pattern_ = token_pattern
self.decode_error_ = decode_error
self.strip_accents_ = strip_accents
self.input_ = input
self.ngram_range_ = ngram_range
self.random_state_ = random_state
self.subsampling_rate_ = subsampling_rate
self.wort_white_list_ = wort_white_list
self.window_size_ = window_size
self.context_window_weighting_ = context_window_weighting
self.binary_ = binary
self.weighting_ = weighting
self.cds_ = cds
self.sppmi_shift_ = sppmi_shift
self._setup()
def _setup(self):
if (not os.path.exists(self.db_path_)):
conn = sqlite3.connect(self.db_path_)
cursor = conn.cursor()
vocab_table = """
CREATE TABLE IF NOT EXISTS Vocab (
id INTEGER PRIMARY KEY AUTOINCREMENT,
min_frequency INTEGER,
lowercase INTEGER,
stop_words TEXT,
encoding TEXT,
max_features INTEGER,
preprocessor TEXT,
tokenizer TEXT,
analyzer TEXT,
token_pattern TEXT,
decode_error TEXT,
strip_accents TEXT,
input TEXT,
ngram_range TEXT,
random_state TEXT,
subsampling_rate FLOAT,
wort_white_list TEXT,
sub_folder TEXT
);
"""
cooc_table = """
CREATE TABLE IF NOT EXISTS Cooccurrence_Matrix (
id INTEGER PRIMARY KEY AUTOINCREMENT,
min_frequency INTEGER,
lowercase INTEGER,
stop_words TEXT,
encoding TEXT,
max_features INTEGER,
preprocessor TEXT,
tokenizer TEXT,
analyzer TEXT,
token_pattern TEXT,
decode_error TEXT,
strip_accents TEXT,
input TEXT,
ngram_range TEXT,
random_state TEXT,
subsampling_rate FLOAT,
wort_white_list TEXT,
window_size TEXT,
context_window_weighting TEXT,
binary INTEGER,
sub_folder TEXT
);
"""
pmi_table = """
CREATE TABLE IF NOT EXISTS PMI_Matrix (
id INTEGER PRIMARY KEY AUTOINCREMENT,
min_frequency INTEGER,
lowercase INTEGER,
stop_words TEXT,
encoding TEXT,
max_features INTEGER,
preprocessor TEXT,
tokenizer TEXT,
analyzer TEXT,
token_pattern TEXT,
decode_error TEXT,
strip_accents TEXT,
input TEXT,
ngram_range TEXT,
random_state TEXT,
subsampling_rate FLOAT,
wort_white_list TEXT,
window_size TEXT,
context_window_weighting TEXT,
binary INTEGER,
weighting TEXT,
cds FLOAT,
sppmi_shift INTEGER,
sub_folder TEXT
);
"""
cursor.execute(vocab_table)
cursor.execute(cooc_table)
cursor.execute(pmi_table)
conn.commit()
conn.close()
def vocab_cache_folder(self):
conn = sqlite3.connect(self.db_path_)
cursor = conn.cursor()
stmt = """
SELECT sub_folder FROM Vocab
WHERE
min_frequency = ? AND
lowercase = ? AND
stop_words = ? AND
encoding = ? AND
max_features = ? AND
preprocessor = ? AND
tokenizer = ? AND
analyzer = ? AND
token_pattern = ? AND
decode_error = ? AND
strip_accents = ? AND
input = ? AND
ngram_range = ? AND
random_state = ? AND
subsampling_rate = ? AND
wort_white_list = ?;
"""
cursor.execute(stmt, (self.min_frequency_, 1 if self.lowercase_ else 0, str(self.stop_words_), self.encoding_,
-1 if self.max_features_ is None else self.max_features_, str(self.preprocessor_),
str(self.tokenizer_), str(self.analyzer_), self.token_pattern_, self.decode_error_,
str(self.strip_accents_), self.input_, str(self.ngram_range_), str(self.random_state_),
0.0 if self.subsampling_rate_ is None else self.subsampling_rate_,
str(self.wort_white_list_)))
result = cursor.fetchone()
conn.close()
return result if not isinstance(result, tuple) else result[0]
def register_vocab(self):
conn = sqlite3.connect(self.db_path_)
cursor = conn.cursor()
stmt = """
INSERT INTO Vocab (min_frequency, lowercase, stop_words, encoding, max_features, preprocessor, tokenizer, analyzer,
token_pattern, decode_error, strip_accents, input, ngram_range, random_state,
subsampling_rate, wort_white_list, sub_folder)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
"""
sub_folder = str(uuid.uuid1())
cursor.execute(stmt, (self.min_frequency_, 1 if self.lowercase_ else 0, str(self.stop_words_), self.encoding_,
-1 if self.max_features_ is None else self.max_features_, str(self.preprocessor_),
str(self.tokenizer_), str(self.analyzer_), self.token_pattern_, self.decode_error_,
str(self.strip_accents_), self.input_, str(self.ngram_range_), str(self.random_state_),
0.0 if self.subsampling_rate_ is None else self.subsampling_rate_,
str(self.wort_white_list_), sub_folder))
conn.commit()
conn.close()
return sub_folder
def cooccurrence_matrix_folder(self):
conn = sqlite3.connect(self.db_path_)
cursor = conn.cursor()
stmt = """
SELECT sub_folder FROM Cooccurrence_Matrix
WHERE
min_frequency = ? AND
lowercase = ? AND
stop_words = ? AND
encoding = ? AND
max_features = ? AND
preprocessor = ? AND
tokenizer = ? AND
analyzer = ? AND
token_pattern = ? AND
decode_error = ? AND
strip_accents = ? AND
input = ? AND
ngram_range = ? AND
random_state = ? AND
subsampling_rate = ? AND
wort_white_list = ? AND
window_size = ? AND
context_window_weighting = ? AND
binary = ?;
"""
cursor.execute(stmt, (self.min_frequency_, 1 if self.lowercase_ else 0, str(self.stop_words_), self.encoding_,
-1 if self.max_features_ is None else self.max_features_, str(self.preprocessor_),
str(self.tokenizer_), str(self.analyzer_), self.token_pattern_, self.decode_error_,
str(self.strip_accents_), self.input_, str(self.ngram_range_), str(self.random_state_),
0.0 if self.subsampling_rate_ is None else self.subsampling_rate_,
str(self.wort_white_list_), str(self.window_size_), self.context_window_weighting_,
1 if self.binary_ else 0))
result = cursor.fetchone()
conn.close()
return result if not isinstance(result, tuple) else result[0]
def register_cooccurrence_matrix(self):
conn = sqlite3.connect(self.db_path_)
cursor = conn.cursor()
stmt = """
INSERT INTO Cooccurrence_Matrix (min_frequency, lowercase, stop_words, encoding, max_features, preprocessor, tokenizer,
analyzer, token_pattern, decode_error, strip_accents, input, ngram_range, random_state,
subsampling_rate, wort_white_list, window_size, context_window_weighting, binary, sub_folder)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
"""
sub_folder = str(uuid.uuid1())
cursor.execute(stmt, (self.min_frequency_, 1 if self.lowercase_ else 0, str(self.stop_words_), self.encoding_,
-1 if self.max_features_ is None else self.max_features_, str(self.preprocessor_),
str(self.tokenizer_), str(self.analyzer_), self.token_pattern_, self.decode_error_,
str(self.strip_accents_), self.input_, str(self.ngram_range_), str(self.random_state_),
0.0 if self.subsampling_rate_ is None else self.subsampling_rate_,
str(self.wort_white_list_), str(self.window_size_), self.context_window_weighting_,
1 if self.binary_ else 0, sub_folder))
conn.commit()
conn.close()
return sub_folder
def pmi_matrix_folder(self):
conn = sqlite3.connect(self.db_path_)
cursor = conn.cursor()
stmt = """
SELECT sub_folder FROM PMI_Matrix
WHERE
min_frequency = ? AND
lowercase = ? AND
stop_words = ? AND
encoding = ? AND
max_features = ? AND
preprocessor = ? AND
tokenizer = ? AND
analyzer = ? AND
token_pattern = ? AND
decode_error = ? AND
strip_accents = ? AND
input = ? AND
ngram_range = ? AND
random_state = ? AND
subsampling_rate = ? AND
wort_white_list = ? AND
window_size = ? AND
context_window_weighting = ? AND
binary = ? AND
weighting = ? AND
cds = ? AND
sppmi_shift = ?;
"""
cursor.execute(stmt, (self.min_frequency_, 1 if self.lowercase_ else 0, str(self.stop_words_), self.encoding_,
-1 if self.max_features_ is None else self.max_features_, str(self.preprocessor_),
str(self.tokenizer_), str(self.analyzer_), self.token_pattern_, self.decode_error_,
str(self.strip_accents_), self.input_, str(self.ngram_range_), str(self.random_state_),
0.0 if self.subsampling_rate_ is None else self.subsampling_rate_,
str(self.wort_white_list_), str(self.window_size_), self.context_window_weighting_,
1 if self.binary_ else 0, self.weighting_, self.cds_, self.sppmi_shift_))
result = cursor.fetchone()
conn.close()
return result if not isinstance(result, tuple) else result[0]
def register_pmi_matrix(self):
conn = sqlite3.connect(self.db_path_)
cursor = conn.cursor()
stmt = """
INSERT INTO PMI_Matrix (min_frequency, lowercase, stop_words, encoding, max_features, preprocessor, tokenizer,
analyzer, token_pattern, decode_error, strip_accents, input, ngram_range, random_state,
subsampling_rate, wort_white_list, window_size, context_window_weighting, binary, weighting,
cds, sppmi_shift, sub_folder)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
"""
sub_folder = str(uuid.uuid1())
cursor.execute(stmt, (self.min_frequency_, 1 if self.lowercase_ else 0, str(self.stop_words_), self.encoding_,
-1 if self.max_features_ is None else self.max_features_, str(self.preprocessor_),
str(self.tokenizer_), str(self.analyzer_), self.token_pattern_, self.decode_error_,
str(self.strip_accents_), self.input_, str(self.ngram_range_), str(self.random_state_),
0.0 if self.subsampling_rate_ is None else self.subsampling_rate_,
str(self.wort_white_list_), str(self.window_size_), self.context_window_weighting_,
1 if self.binary_ else 0, self.weighting_, self.cds_, self.sppmi_shift_, sub_folder))
conn.commit()
conn.close()
return sub_folder
|
[
"th0mas.ko6er@gmail.com"
] |
th0mas.ko6er@gmail.com
|
fc863595d297b8d3b60291baa8e91d86337567af
|
650c61c25574996faad47a655d68da9169b0e539
|
/main/Parenthesis_matching.py
|
64b3883a172bdd77d268488c42b4df34bea5c3a6
|
[] |
no_license
|
shubhmshr/problem_solving
|
26459272b9998281fc549e10cf18675455b9542c
|
c26d7c7136873bf96864328aac2c9d0ceb563eec
|
refs/heads/master
| 2023-05-23T00:22:24.314804
| 2021-06-06T18:50:11
| 2021-06-06T18:50:11
| 374,407,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
from main.stack_adt import ArrayStack
def parenthesis_check(para):
open_list = '({['
close_list = ')}]'
s = ArrayStack()
for ele in para:
if ele in open_list:
#print("pushing",ele)
s.push(ele)
elif ele in close_list:
if s.is_empty():
print("Stack is empty")
return False
elif open_list.index(s.top()) != close_list.index(ele):
return False
else:
s.pop()
return s.is_empty()
if __name__ == '__main__':
pass
|
[
"shubh.mshr+gitkraken@gmail.com"
] |
shubh.mshr+gitkraken@gmail.com
|
8eccec346ef0c583b129cf4ce4d3a124129b917a
|
327b662d9af2fe0647d4d4471bfb1ba65431e6f9
|
/marketsim/marketsim.py
|
43b7a8ba4da9e91615473e80a1730e751ec98baf
|
[] |
no_license
|
fords/Machine_Learning_Trade
|
414c3fc1ff22251695c414dd7ed5f33344759d55
|
020dedcc93f7a85235f8c0f7ae6d24504d812997
|
refs/heads/master
| 2021-09-13T09:29:09.980469
| 2018-04-27T20:14:17
| 2018-04-27T20:14:17
| 118,845,905
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,044
|
py
|
"""MC2-P1: Market simulator.
Copyright 2017, Georgia Tech Research Corporation
Atlanta, Georgia 30332-0415
All Rights Reserved
"""
import pandas as pd
import numpy as np
import datetime as dt
import os
from util import get_data, plot_data
def author():
return 'zwin3'
def compute_portvals(orders_file = "./orders/orders.csv", start_val = 1000000, commission=9.95, impact=0.005):
# this is the function the autograder will call to test your code
# NOTE: orders_file may be a string, or it may be a file object. Your
# code should work correctly with either input
# In the template, instead of computing the value of the portfolio, we just
# read in the value of IBM over 6 months
start_date = dt.datetime(2008,1,1)
end_date = dt.datetime(2008,6,1)
portvals = get_data(['IBM'], pd.date_range(start_date, end_date))
portvals = portvals[['IBM']] # remove SPY
rv = pd.DataFrame(index=portvals.index, data=portvals.as_matrix())
orders_df = pd.read_csv(orders_file, index_col='Date', \
parse_dates=True, na_values=['nan'],\
#names=['Date', 'SYMBOL', 'ORDER', 'SHARES'],\
#skiprows=[0]\
)
orders_df.sort_index(inplace=True)
dates = pd.date_range(min(orders_df.index), max(orders_df.index))
symbols = [] # array
symbols = list(set(orders_df['Symbol']))
prices = get_data(symbols, dates)
cash_df = pd.DataFrame( index = dates, columns = ['Cash'])
cash_df = cash_df.fillna(1.000)
prices = prices.join(cash_df)
trades = pd.DataFrame(.0, columns = prices.columns, index = prices.index)
comission_df = pd.DataFrame(index = prices.index, columns = ['Commission'])
comission_df = comission_df.fillna(.00)
impact_df = pd.DataFrame(index = prices.index, columns = ['Impact'])
impact_df = impact_df.fillna(.00)
for i, iterrows in orders_df.iterrows():
shares = iterrows['Shares']
symbols = iterrows['Symbol']
if (iterrows['Order'] == 'SELL'):
trades.loc[i][symbols] = trades.loc[i][symbols] + (-1 * shares)
elif (iterrows['Order'] == 'BUY'):
trades.loc[i][symbols] = trades.loc[i][symbols] + (1 * shares)
comission_df.loc[i]['Commission'] = comission_df.loc[i]['Commission'] + commission
impact_df.loc[i]['Impact'] = impact_df.loc[i]['Impact'] + (prices.loc[i][symbols] * shares * impact)
temp_df = prices * trades
trades['Cash'] = -1.0 * temp_df.sum(axis = 1)
trades['Cash'] = trades['Cash'] - comission_df['Commission'] - impact_df['Impact']
holdings = pd.DataFrame( .0, columns = trades.columns, index = trades.index)
holdings.loc[min(trades.index), 'Cash'] = start_val # start_date = min(trades.index)
holdings = holdings + trades
portvals = (prices * holdings.cumsum()).sum(axis = 1)
#return rv
return portvals
def assess_portfolio(portvals, rfr=0.0, sf=245.0, \
gen_plot=False):
# Get portfolio statistics (note: std_daily_ret = volatility)
#cr = compute_cumu_returns(port_val)
cr = (portvals.ix[-1,:]/portvals.ix[0,:]) -1
daily_rets = []
daily_rets = compute_daily_returns(portvals)
daily_rets = daily_rets[1:] # daily returns
adr = daily_rets.mean()
sddr = daily_rets.std() # standard deviation daily return
difference = daily_rets - rfr
mean_val = difference.mean()
sr = np.sqrt(sf)* mean_val/sddr #sharpe ratio
# Compare daily portfolio value with SPY using a normalized plot
if gen_plot:
# add code to plot here
df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1)
plot_data( df_temp, title= "Plot" , ylabel= "Prices")
pass
ev = portvals.ix[-1,:]
#print ("ev",ev)
return cr, adr, sddr, sr
def compute_daily_returns(df):
daily_returns = df.copy()
daily_returns[1:] = (df[1:]/ df[:-1].values) -1
daily_returns = daily_returns[1:]
return daily_returns
def compute_cumu_returns(df):
""" Compute and return the daily cumulative return values"""
cumulative_df = df.copy()
cumulative_df[0:] = (df[0:]/ df.ix[0,:]) -1
return cumulative_df
def test_code(start_date= dt.datetime(2011,01,14), end_date = dt.datetime(2011,12,14), of= "./orders/orders2.csv" ):
# this is a helper function you can use to test your code
# note that during autograding his function will not be called.
# Define input parameters
#of = "./orders/orders2.csv"
sv = 1000000
# Process orders
portvals = compute_portvals(orders_file = of, start_val = sv)
if isinstance(portvals, pd.DataFrame):
portvals = portvals[portvals.columns[0]] # just get the first column
else:
"warning, code did not return a DataFrame"
# Get portfolio stats
# Here we just fake the data. you should use your code from previous assignments.
#start_date = dt.datetime(2011,01,14)
#end_date = dt.datetime(2011,12,14)
cum_ret, avg_daily_ret, std_daily_ret, sharpe_ratio = assess_portfolio(portvals, sf =245)
prices_SPX = get_data(['$SPX'] , pd.date_range(start_date, end_date))
prices_SPX = prices_SPX[['$SPX']]
portvals_SPX = (prices_SPX/prices_SPX.ix[0,:]).sum(axis=1)
cum_ret_SPY, avg_daily_ret_SPY, std_daily_ret_SPY, sharpe_ratio_SPY = assess_portfolio(portvals_SPX,sf =252)
#cum_ret_SPY, avg_daily_ret_SPY, std_daily_ret_SPY, sharpe_ratio_SPY = [0.2,0.01,0.02,1.5]
daily_returns = portvals[1:].values / portvals[:-1] - 1
cum_ret = portvals[-1] / portvals[0] - 1
avg_daily_ret = daily_returns.mean()
std_daily_ret = daily_returns.std()
sharpe_ratio = np.sqrt(245) * avg_daily_ret / std_daily_ret #sf = 245
daily_returns_SPY = portvals_SPX[1:].values / portvals_SPX[:-1] - 1
cum_ret_SPY = portvals_SPX[-1] / portvals_SPX[0] - 1
avg_daily_ret_SPY = daily_returns_SPY.mean()
std_daily_ret_SPY = daily_returns_SPY.std()
sharpe_ratio_SPY = np.sqrt(252) * avg_daily_ret_SPY / std_daily_ret_SPY #sf = 252
# Compare portfolio against $SPX
print "Date Range: {} to {}".format(start_date, end_date)
print
print "Sharpe Ratio of Fund: {}".format(sharpe_ratio)
print "Sharpe Ratio of SPY : {}".format(sharpe_ratio_SPY)
print
print "Cumulative Return of Fund: {}".format(cum_ret)
print "Cumulative Return of SPY : {}".format(cum_ret_SPY)
print
print "Standard Deviation of Fund: {}".format(std_daily_ret)
print "Standard Deviation of SPY : {}".format(std_daily_ret_SPY)
print
print "Average Daily Return of Fund: {}".format(avg_daily_ret)
print "Average Daily Return of SPY : {}".format(avg_daily_ret_SPY)
print
print "Final Portfolio Value: {}".format(portvals[-1])
if __name__ == "__main__":
start_date= dt.datetime(2011,01,14)
end_date = dt.datetime(2011,12,14)
of= "./orders/orders2.csv"
test_code(start_date,end_date, of)
#test_code(dt.datetime(2011,01,10), dt.datetime(2011,12,20), "./orders/orders.csv")
|
[
"fords@users.noreply.github.com"
] |
fords@users.noreply.github.com
|
8dcf5d6849f04540c24cb60627bdd958716b733d
|
518e9cbf940b5addb2194a96f277050c44963121
|
/Python/Practices/linked_list/sum_lists.py
|
883c6a3a1c50252dfa36caced8e265b0494ad9dd
|
[] |
no_license
|
Mr-Perfection/coding_practice
|
58322192b76a2ab70c4ae11cd05b3bf993272c52
|
41df85292a151eef3266b01545124aeb4e831286
|
refs/heads/master
| 2021-01-11T02:24:26.234486
| 2017-04-11T00:21:10
| 2017-04-11T00:21:10
| 70,965,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
"""
Sum Lists: You have two numbers represented by a linked list, where each node contains a single digit.
The digits are stored in reverse order, such that the 1's digit is at the head of the list.
Write a function that adds the two numbers and returns the sum as a linked list.
Suppose the digits are stored in forward order. Repeat the above problem. EXAMPLE
Input: (6 -) 1 -) 7) + (2 -) 9 -) 5).Thatis,617 + 295. Output: 9 -) 1 -) 2.That is, 912.
6 -> 1 -> 7
2 -> 9 -> 5
= 617 + 295 =912
returns 9 -> 1 -> 2
Approach #1: reverse lists and add with carry (O(n) time)
7 -> 1 -> 6
5 -> 9 -> 2
Approach #2: make them integers and add them. Then, make a linked-list
"""
d
|
[
"sungsoolee0127@gmail.com"
] |
sungsoolee0127@gmail.com
|
4aeb56fe0cc5beeb4e26eadb50619cb4dddf1437
|
a6b2d9c522db6c9b47633efc5df8d630ba5be6e2
|
/paper_blitz/survey/create_survey.py
|
93bcc78421d679c188ab2afe79660c45c2601822
|
[] |
no_license
|
Noezor/paper_blitz
|
be64bbfee6969f928362a6793141a26837df6e08
|
1c4e601fcbf6b64078e8ec217d184ee7384afc9f
|
refs/heads/master
| 2020-12-12T16:08:27.691158
| 2020-02-12T20:40:55
| 2020-02-12T20:40:55
| 234,169,822
| 0
| 0
| null | 2020-02-12T20:40:57
| 2020-01-15T20:45:26
|
Python
|
UTF-8
|
Python
| false
| false
| 765
|
py
|
from paper_blitz.stack.model import Participant, Stack, Group
from paper_blitz.articles.article import Article
from paper_blitz.config import db
from datetime import datetime, timedelta
class Article_Selector_Survey(object):
def select_articles(self, group : Group):
non_presented_articles = group.non_presented_articles()
articles_current_week = [article for article in non_presented_articles if self.is_submission_current_week(article)]
old_articles = [article for article in non_presented_articles if article not in articles_current_week]
return articles_current_week, old_articles
def is_submission_current_week(self, article : Stack):
return (article.added + timedelta(days = 7)) > datetime.now()
|
[
"noe.pion@gmail.com"
] |
noe.pion@gmail.com
|
3fa9322ab882012f8dd6fc64efa180bbd27ec444
|
f0856e60a095ce99ec3497b3f27567803056ac60
|
/keras/keras19~31[scaler, CNN(GAP,DNN)]/keras31_cifar100_3_Djsull.py
|
9150dcc697bf72ace67e7f4f1b9da8a5c55e6d9b
|
[] |
no_license
|
hjuju/TF_Study-HAN
|
dcbac17ce8b8885f5fb7d7f554230c2948fda9ac
|
c0faf98380e7f220868ddf83a9aaacaa4ebd2c2a
|
refs/heads/main
| 2023-09-04T09:13:33.212258
| 2021-10-27T08:00:49
| 2021-10-27T08:00:49
| 384,371,952
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,418
|
py
|
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, QuantileTransformer,MaxAbsScaler, PowerTransformer, OneHotEncoder
from tensorflow.keras.datasets import cifar100
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, Conv2D, Flatten, MaxPool2D
from keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping
from icecream import ic
import time
#1. 데이터 전처리
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.reshape(50000, 32 * 32 * 3)
x_test = x_test.reshape(10000, 32 * 32 * 3) # 2차원으로 바꿔줌
ic(x_train)
# # x_train = x_train/255.
# # x_test = x_test/255.
# y_train = to_categorical(y_train)
# y_test = to_categorical(y_test)
# scaler = StandardScaler()
# x_train = scaler.fit_transform(x_train) # x_train에서만 사용가능 x_train = scaler.fit(x_train), x_train = scaler.transform(x_train)를 한줄로
# x_test = scaler.transform(x_test)
# x_train = x_train.reshape(50000, 32, 32, 3)
# x_test = x_test.reshape(10000, 32 ,32, 3) # 스케일링 후 4차원으로 원위치
# # print(np.unique(y_train)) # [0 1 2 3 4 5 6 7 8 9]
# # one = OneHotEncoder() # shape를 2차원으로 잡아야함
# # y_train = y_train.reshape(-1,1) # 2차원으로 변경
# # y_test = y_test.reshape(-1,1)
# # one.fit(y_train)
# # y_train = one.transform(y_train).toarray() # (50000, 100)
# # y_test = one.transform(y_test).toarray() # (10000, 100)
# # to categorical -> 3,4,6,8 되어있어도 0,1,2가 자동생성(shape에 더 유연)
# # 3, 4, 5 ,6, 7 이면 그대로 3,4,5,6,7(shape가 2차원이어야함)
# #2. 모델링
# model = Sequential()
# model.add(Conv2D(filters=128, kernel_size=(2, 2), padding='valid', activation='relu', input_shape=(32, 32, 3)))
# model.add(Conv2D(128, (2, 2), padding='same', activation='relu'))
# model.add(MaxPool2D())
# model.add(Conv2D(128, (2, 2), padding='valid', activation='relu'))
# model.add(Conv2D(128, (2, 2), padding='same', activation='relu'))
# model.add(MaxPool2D())
# model.add(Conv2D(64, (2, 2), activation='relu'))
# model.add(Conv2D(64, (2, 2), padding='same', activation='relu')) # 큰사이즈 아닌 이상 4,4 까지 올라가지 않음
# model.add(MaxPool2D()) # 556개 / 나가는 데이터를 확인해서 레이의 노드 개수 구성
# model.add(Flatten())
# model.add(Dense(128, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(100, activation='softmax'))
# #3. 컴파일, 훈련
# es = EarlyStopping(monitor='val_loss', patience=10, mode='auto', verbose=1)
# model.compile(loss='categorical_crossentropy', optimizer='adam',
# metrics=['acc'])
# start = time.time()
# hist = model.fit(x_train, y_train, epochs=100, batch_size=64,
# validation_split=0.25, callbacks=[es])
# 걸린시간 = round((time.time() - start) /60,1)
# #4. evaluating, prediction
# loss = model.evaluate(x_test, y_test, batch_size=128)
# print('loss = ', loss[0])
# print('accuracy = ', loss[1])
# ic(f'{걸린시간}분')
# import matplotlib.pyplot as plt
# plt.figure(figsize=(9,5))
# #1
# plt.subplot(2,1,1) # 그림을 2개그리는데 1행1렬
# plt.plot(hist.history['loss'], marker='.', c='red', label='loss')
# plt.plot(hist.history['val_loss'], marker='.', c='blue', label='val_loss')
# plt.grid()
# plt.title('loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(loc='upper right')
# #2
# plt.subplot(2,1,2) # 그림을 2개그리는데 1행1렬
# plt.plot(hist.history['acc'])
# plt.plot(hist.history['val_acc'])
# plt.grid()
# plt.title('acc')
# plt.ylabel('acc')
# plt.xlabel('epoch')
# plt.legend(['acc', 'val_acc'])
# plt.show
# '''
# loss = 3.0406737327575684
# accuracy = 0.3928000032901764
# batch_size=64, validation_split=0.25
# loss = 5.080616474151611
# accuracy = 0.33799999952316284
# ic| f'{걸린시간}분': '3.5분'
# 모델수정 / patience=7,epochs=100, batch_size=64, validation_split=0.25
# loss = 2.777371406555176
# accuracy = 0.376800000667572
# '''
|
[
"tkackeowjs@naver.com"
] |
tkackeowjs@naver.com
|
cdaba21d2292e0aee6015161688ac6fee5b5c47a
|
5b1a2638cfff6cb15854461eafd36ef5895a2ee4
|
/calculating_pi.py
|
9d3614cee03723bd481277bf618ad62bfa678941
|
[] |
no_license
|
NataliaGuruleva/NataliaGurAU
|
883f69c078b107687f656b5dc1e7f119e43ac463
|
587d844ad647abb6d0089aba6dbaa7850d03944e
|
refs/heads/master
| 2023-04-29T21:11:37.749483
| 2021-05-18T00:53:07
| 2021-05-18T00:53:07
| 298,563,783
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
import random
import time
import sys
import multiprocessing
from multiprocessing import Pool
n = 100000
curr = 100
def calculate_pi(q):
count = 0
for i in range(q):
x=random.uniform(0, 1)
y=random.uniform(0, 1)
if x * x + y * y <= 1:
count = count + 1
return count
def points(pool):
l = pool.map(calculate_pi, [n]*curr)
return l
if __name__=='__main__':
pool = multiprocessing.Pool()
N = sum(points(pool))
pi = (N / (n * curr)) * 4
t0 = time.time()
print('пи:', pi)
print("Time spent:", time.time() - t0)
|
[
"noreply@github.com"
] |
noreply@github.com
|
fabc61328f4c24c15543f48a5cc31cedcdf76354
|
4a6c5b4a04946d2b4b4599a68f957ac9c70fb87c
|
/code-examples/pytest/foo/bar/bar1.py
|
efd641db20c3b21d90f24bb4b9fe146165676bd9
|
[] |
no_license
|
weichuntsai0217/work-note
|
29d2da3b7af7431a2ee67039ad801e0441ed4039
|
425528b59d6b22a8ad54cadcbacaa21d69bb6242
|
refs/heads/master
| 2021-06-14T10:57:52.025803
| 2021-01-31T10:25:28
| 2021-02-06T07:04:44
| 163,857,958
| 0
| 0
| null | 2021-03-25T23:00:32
| 2019-01-02T15:28:01
|
Python
|
UTF-8
|
Python
| false
| false
| 56
|
py
|
from ..foo1 import f1
def b1(x):
return f1(x) * 10
|
[
"jimmy_tsai@trendmicro.com"
] |
jimmy_tsai@trendmicro.com
|
c97e9f32dd8b94b6bb3365179ef73965eccd8be5
|
bedae10cbaf676d8f309fa593028558d9a6e9c6b
|
/Algorithm/Easy/1000+/1206NextGreaterElementI.py
|
dfcffec1d8a09564dfb341ed4eb30870284fee73
|
[
"MIT"
] |
permissive
|
MartinYan623/Lint-Code
|
5800d61a54f87306c25ff2e3d535145312b42c66
|
57d2fa441d6496234615736e3f55d0b71aaa51dc
|
refs/heads/master
| 2021-06-06T13:51:19.587424
| 2021-04-21T12:23:19
| 2021-04-21T12:23:19
| 139,412,536
| 0
| 0
| null | 2020-08-08T10:28:52
| 2018-07-02T08:18:11
|
Python
|
UTF-8
|
Python
| false
| false
| 627
|
py
|
class Solution:
"""
@param nums1: an array
@param nums2: an array
@return: find all the next greater numbers for nums1's elements in the corresponding places of nums2
"""
def nextGreaterElement(self, nums1, nums2):
# Write your code here
for i in range(len(nums1)):
index=nums2.index(nums1[i])
flag=False
for j in range(index+1,len(nums2)):
if nums2[j]>nums1[i]:
flag=True
nums1[i]=nums2[j]
break
if flag==False:
nums1[i]=-1
return nums1
|
[
"Maitong_y@163.com"
] |
Maitong_y@163.com
|
b85e78725562d0b612aa34719ff3eb0a496140ca
|
f0505f73cc535ce7abb3c3fcf1791830271444eb
|
/Problems/Linkedlists/remove_dup.py
|
4b245100ce96d769684376d4cc1eefe98df6bf23
|
[] |
no_license
|
ravitejamallozala/DataStructures
|
13921efc19919c1b7796e13e8362b1874c488c1e
|
a7ca3cf4da40e71ee0bfc7a3d2b657c28bd827a9
|
refs/heads/master
| 2023-07-16T03:09:00.941236
| 2021-09-03T17:23:58
| 2021-09-03T17:23:58
| 103,118,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
"""
Remove duplicates from an unsorted linked list
Given an unsorted linked list of N nodes. The task is to remove duplicate elements from this unsorted Linked List. When a value appears in multiple nodes, the node which appeared first should be kept, all others duplicates are to be removed.
Example 1:
Input:
N = 4
value[] = {5,2,2,4}
Output: 5 2 4
Explanation:Given linked list elements are
5->2->2->4, in which 2 is repeated only.
So, we will delete the extra repeated
elements 2 from the linked list and the
resultant linked list will contain 5->2->4
GFG
"""
# User function Template for python3
'''
# Node Class
class Node:
def __init__(self, data): # data -> value stored in node
self.data = data
self.next = None
'''
class Solution:
# Function to remove duplicates from unsorted linked list.
def removeDuplicates(self, head):
# code here
# return head after editing list
ele = set()
temp = head
prev = None
while (temp):
if temp.data_Arr in ele:
temp = temp.next
prev.next = temp
else:
ele.add(temp.data_Arr)
prev = temp
temp = temp.next
return head
# {
# Driver Code Starts
# Initial Template for Python 3
# Contributed by : Nagendra Jha
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
# Node Class
class Node:
def __init__(self, data): # data -> value stored in node
self.data = data
self.next = None
# Linked List Class
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
# creates a new node with given value and appends it at the end of the linked list
def append(self, new_value):
new_node = Node(new_value)
if self.head is None:
self.head = new_node
self.tail = new_node
return
self.tail.next = new_node
self.tail = new_node
# prints the elements of linked list starting with head
def printList(self):
if self.head is None:
print(' ')
return
curr_node = self.head
while curr_node:
print(curr_node.data_Arr, end=" ")
curr_node = curr_node.next
print(' ')
if __name__ == '__main__':
t = int(input())
for cases in range(t):
n = int(input())
a = LinkedList() # create a new linked list 'a'.
nodes = list(map(int, input().strip().split()))
for x in nodes:
a.append(x)
a.head = Solution().removeDuplicates(a.head)
a.printList()
# } Driver Code Ends
|
[
"raviteja.mallozala@ibigroup.com"
] |
raviteja.mallozala@ibigroup.com
|
ac30c543b6bdf9cc9594a8b2d469493f5c9941a5
|
121135b3585ec70b1d0b68962b413b48f75270f8
|
/sem_2/task050318/task_2/homo_gramma.py
|
0d4e2b224527c8683bed350bdda993210a66d6fd
|
[] |
no_license
|
Ananaskelly/data_analysis_course_hw
|
80d05488bb9c7df114c3b8131971ea9ad04df9a8
|
519d1a66f1056e5b6612206c2de631e24a35699b
|
refs/heads/master
| 2021-09-16T06:07:36.319910
| 2018-06-17T21:36:42
| 2018-06-17T21:36:42
| 106,135,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
import nltk
from nltk import Nonterminal, nonterminals, Production, CFG
def run():
cool_grammar = nltk.CFG.fromstring("""
S -> NP VP
PP -> Det N
NP -> Det N | PP PP | 'я'
VP -> V NP | VP PP
Det -> 'его' | 'своими'
N -> 'семью' | 'глазами'
V -> 'видел'
""")
sent = ['я', 'видел', 'его', 'семью', 'своими', 'глазами']
parser = nltk.ChartParser(cool_grammar)
for tree in parser.parse(sent):
print(tree)
|
[
"anastasiya_johnni@mail.ru"
] |
anastasiya_johnni@mail.ru
|
4cf32a4ed4b7852980136c686edbf100e8a3da2b
|
67fac9a29db0715b9e55519d952bc595d9807c56
|
/05_DataStructures/5.3_TuplesAndSequences.py
|
8ae68d8bf49db87d3e1ece82e4f486cff2cfb7fc
|
[] |
no_license
|
aritrac/SolidPython
|
ee2b2f14a33e577e1c0728a6bc2649e27ef8c2c2
|
92a788a81afdcaf38fa5d13d8890764a7b62ae47
|
refs/heads/master
| 2022-12-08T07:40:22.104382
| 2020-09-09T07:50:05
| 2020-09-09T07:50:05
| 293,191,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
t = 12345, 54321, 'hello!'
print(t[0])
print(t)
# Tuples may be nested:
u = t, (1, 2, 3, 4, 5)
print(u)
#Tuples are immutable:
#t[0] = 88888 #This will error out
#but they can contain mutable objects:
v = ([1, 2, 3], [3, 2, 1])
print(v)
empty = ()
singleton = 'hello', # <-- note trailing comma, so it is not a string
print(len(empty))
print(len(singleton))
print(singleton)
#t = 12345, 54321, 'hello!' #Tuple packing
#Tuple unpacking
x,y,z = t
print(x)
print(y)
print(z)
|
[
"aritrachatterjee2007@gmail.com"
] |
aritrachatterjee2007@gmail.com
|
2a961b54a6287de52ac25425a39ede9c95574aba
|
a00c4056e2dfcc43a0c91cfae76cbff7ec5aa5f3
|
/models/models.py
|
28a69988b2bed0a414dfd239e69ae5c48ed52518
|
[
"MIT"
] |
permissive
|
renfan1/I2V-GAN
|
72ce038f9b1ba817074614116d7676eccdbde684
|
0fb2a79548cbbd5952913f9975110c15d01559aa
|
refs/heads/main
| 2023-08-22T05:39:15.643111
| 2021-10-26T04:38:33
| 2021-10-26T04:38:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
def create_model(opt):
model = None
# print(opt.model)
if opt.model == 'cycle_gan':
assert(opt.dataset_mode == 'unaligned')
from .cycle_gan_model import CycleGANModel
model = CycleGANModel()
elif opt.model == 'i2vgan':
assert(opt.dataset_mode == 'unaligned_triplet')
from .i2v_gan_model import I2VGAN
model = I2VGAN()
else:
raise ValueError("Model [%s] not recognized." % opt.model)
model.initialize(opt)
print("model [%s] was created" % (model.name()))
return model
|
[
"3120190999@bit.edu.cn"
] |
3120190999@bit.edu.cn
|
c618c313ac7c0543f07fe46761091bec8660f401
|
f6087d2a5ddaf930a0b693460f2abea8655c2d2a
|
/mathematics/sieve_of_eratosthenes/efficient_method.py
|
ff317ae4dd4be0189e040c1f735cc27a3940fadd
|
[] |
no_license
|
pragatij17/Algorithms
|
b9797d9ee9dfa838d4de67a478e3aa9950cd6015
|
ef6577e82cbc8e372d066784a5479b9b86463581
|
refs/heads/master
| 2023-04-22T20:10:29.478963
| 2021-05-14T12:29:16
| 2021-05-14T12:29:16
| 332,750,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
def SieveOfEratosthenes(n):
prime = [True for i in range(n+1)]
p = 2
while (p * p <= n):
if (prime[p] == True):
for i in range(p * p, n+1, p):
prime[i] = False
p += 1
for p in range(2, n+1):
if prime[p]:
print (p),
n=int(input("Enter the number:"))
print(SieveOfEratosthenes(n))
|
[
"pj.pragatijain09@gmail.com"
] |
pj.pragatijain09@gmail.com
|
964de307289972354a1b551f7c32d12f000e98d4
|
95ec5d4d14516be1a1fdcc8bd1fb29279dfaff3c
|
/settings.py
|
513790faf5a80aaabdf246439d26eae875211e35
|
[] |
no_license
|
gitter-badger/dev4gov.org
|
16e25621a81552a6458cdd21cb96f17c7e222350
|
a03165e921d0e76ad4283c970b3e0540f7d53c75
|
refs/heads/master
| 2021-01-18T06:52:15.360799
| 2011-08-21T09:01:55
| 2011-08-21T09:01:55
| 41,620,266
| 0
| 0
| null | 2015-08-30T07:25:32
| 2015-08-30T07:25:32
| null |
UTF-8
|
Python
| false
| false
| 5,041
|
py
|
# Django settings for dev4gov_org project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'f4x@q6h+!nk6&=nf#ro5hh(p-%!ohxm_s70dyd7e@1@7@t)s3g'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'dev4gov_org.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"sirmmo@gmail.com"
] |
sirmmo@gmail.com
|
aa550ba6970d764ed8604ac766040589e7cf3368
|
8cf35198f7428541afd2344e329d5555d2331d2e
|
/battleship_for_34.py
|
bb6943bed8a6308a266be7ab5a835b22fa9e8ea7
|
[] |
no_license
|
CrazeeIvan/Python
|
9023efb647d2557fdb727e9099fcfa021548d2d0
|
ba9defd812cb670f9d4d7b8c775368c9df9cfdcc
|
refs/heads/master
| 2021-01-10T17:59:07.999948
| 2016-03-29T23:03:37
| 2016-03-29T23:03:37
| 52,006,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
from random import randint
#initialise empty board
board = []
#add list to list to create 2-dimensional array
#["O", "O", "O", "O", "O"]
#["O", "O", "O", "O", "O"]
#["O", "O", "O", "O", "O"]
#["O", "O", "O", "O", "O"]
#["O", "O", "O", "O", "O"]
for x in range(5):
board.append(["O"] * 5)
#draw board function
def print_board(board):
for row in board:
print(" ".join(row))
print("Let's play Battleship!")
#select a random row using randint
def random_row(board):
return randint(0, len(board) - 1)
#select a random column using randint
def random_col(board):
return randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
#for testing/debugging
print(ship_row)
print(ship_col)
#beginning of our gameloop
for turn in range(4):
#print the turn to the player
print("Turn", turn + 1)
#print a message to the user asking for input and wait
guess_row = int(input("Guess Row:"))
guess_col = int(input("Guess Col:"))
#if the player guess correctly, reward + print the board, end the game loop
if guess_row == ship_row and guess_col == ship_col:
print("Congratulations! You sunk my battleship!")
#draw a B to the gameboard array, to show where the enemy is
board[ship_row][ship_col]="B"
print_board(board)
break
else:
#if the player guesses outside of the board size, advise them
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print("Oops, that's not even in the ocean.")
#if the player guesses a location they have already selected, advise them
elif(board[guess_row][guess_col] == "X"):
print("You guessed that one already.")
else:
#else, they missed, advise them
print("You missed my battleship!")
board[guess_row][guess_col] = "X"
#if the turns equals 3, the game is over and the player is a loser
if (turn == 3):
#draw a B to the gameboard array, to show where the enemy is
board[ship_row][ship_col]="B"
print("Game Over")
print_board(board)
|
[
"ciaranmaher@gmail.com"
] |
ciaranmaher@gmail.com
|
e53f90e1a1b7c9990dec171bf0d4195e10c3acdc
|
12ca5c99d39c409bc636e30ad52e5b3bb309bbb9
|
/5_oblig/kode_uke6/velkomst.py
|
5478baf91a1fe90245fe36b60363bdb8444b6ff6
|
[] |
no_license
|
emelleby/in1000
|
6b4ee4d6505800b5d4b06ff12a4cca1b69d5c20d
|
613115cee2cd33c56df63d3dbb20c60127126fba
|
refs/heads/master
| 2021-07-04T15:58:14.466735
| 2021-05-19T18:09:49
| 2021-05-19T18:09:49
| 239,628,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
def velkomst(navn):
print("Hei " + navn)
gk = "Geir Kjetil"
velkomst(gk)
|
[
"emelleby@gmail.com"
] |
emelleby@gmail.com
|
ccf04a495abae668443c87f8364c24d6438f2ec7
|
13866adfde5a56fc4061f1598d70a02e7c2c960e
|
/Planck_MMF/analysis/modules/simulate/sim_fs_data.py
|
6b0c4cb9a0e176bd0f909806b858be1bfd1ad0db
|
[] |
no_license
|
adityarotti/MMF-filters-for-rSZ-cluster-detection
|
4d8bcdd859f992b0a32e155eb99a66bacceea21b
|
d79998f2c1bdebc832e4212cd98cbddbcfc0be90
|
refs/heads/master
| 2023-03-08T12:47:44.231905
| 2021-02-18T15:46:06
| 2021-02-18T15:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,525
|
py
|
##################################################################################################
# Author: Aditya Rotti, Jodrell Bank Center for Astrophysics, University of Manchester #
# Date created: 15 January September 2019 #
# Date modified: 16 March 2019 #
##################################################################################################
import numpy as np
import healpy as h
from scipy.interpolate import interp1d
from astropy.io import fits
from modules.settings import global_mmf_settings as gset
from spectral_template import sz_spec as szsed
from spatial_template import sim_cluster as sc
from spatial_template import sz_pressure_profile as szp
from flat_sky_codes import flat_sky_analysis as fsa
from flat_sky_codes import tangent_plane_analysis as tpa
def sim_cluster(clus_prop={},write_alm=True,cutoff=10.,profile="GNFW"):
if clus_prop=={}:
clus_prop={}
clus_prop[0]={}
clus_prop[0]["injy"]=0.0002 ; clus_prop[0]["theta500"]=8. # arcminutes
clus_prop[0]["lon"]=-4. ; clus_prop[0]["lat"]=60.
clus_prop[2]={}
clus_prop[2]["injy"]=0.0004 ; clus_prop[2]["theta500"]=12. # arcminutes
clus_prop[2]["lon"]=4. ; clus_prop[2]["lat"]=62.
clusters=np.zeros(h.nside2npix(gset.mmfset.nside),float)
for k in clus_prop.keys():
#print clus_prop[k].keys()
inj_y=clus_prop[k]['injy']
theta500=clus_prop[k]['theta500']
lon=clus_prop[k]['lon']
lat=clus_prop[k]['lat']
rhop=np.linspace(0.001*theta500,1.2*cutoff*theta500,500.)
yprofile=np.zeros(np.size(rhop),float)
if profile=="GNFW":
yprofile=szp.gnfw_2D_pressure_profile(rhop,theta500)
elif profile=="beta":
yprofile=szp.analytical_beta_2D_profile_profile(rhop,theta500)
fn_yprofile=interp1d(rhop,yprofile,kind="cubic",bounds_error=False,fill_value=(yprofile[0],yprofile[-1]))
vec=h.ang2vec(lon,lat,lonlat=True)
cpix=h.vec2pix(gset.mmfset.nside,vec[0],vec[1],vec[2])
theta0,phi0=h.pix2ang(gset.mmfset.nside,cpix)
spix=h.query_disc(gset.mmfset.nside,vec,(theta500*cutoff/60.)*np.pi/180.,inclusive=True,fact=2)
theta1,phi1=h.pix2ang(gset.mmfset.nside,spix)
cosbeta=np.sin(theta0)*np.sin(theta1)*np.cos(phi1-phi0)+np.cos(theta0)*np.cos(theta1)
beta=np.arccos(cosbeta)*180./np.pi*60
clusters[cpix]=fn_yprofile(0.)*inj_y
for i,pix in enumerate(spix):
clusters[pix]=fn_yprofile(beta[i])*inj_y
filename=gset.mmfset.paths["clusters"] + "clusters.fits"
h.write_map(filename,clusters,overwrite=True)
if write_alm:
filename=gset.mmfset.paths["clusters"] + "clusters_alm.fits"
cluster_alm=h.map2alm(clusters,lmax=3*gset.mmfset.nside)
h.write_alm(filename,cluster_alm,overwrite=True)
def gen_multi_freq_cluster_map(T=0.):
plbp_sz_spec=szsed.return_planck_bp_sz_spec(T=T)
filename=gset.mmfset.paths["clusters"] + "clusters_alm.fits"
cluster_alm=h.read_alm(filename)
for ch in gset.mmfset.channels:
fwhm=(gset.mmfset.fwhm[ch]/60.)*np.pi/180.
bl=h.gauss_beam(fwhm,lmax=3*gset.mmfset.nside)
if gset.mmfset.pwc:
pwc=h.pixwin(gset.mmfset.nside)[:np.size(bl)]
bl=bl*pwc
almp=h.almxfl(cluster_alm,bl)*plbp_sz_spec[ch]
cluster=h.alm2map(almp,gset.mmfset.nside,verbose=False)
filename=gset.mmfset.paths["clusters"] + "cluster_" + str(int(ch)) + "GHz.fits"
h.write_map(filename,cluster,overwrite=True)
print ch
def sim_multi_frequency_cmb_map():
cl=h.read_cl(gset.mmfset.cmb_spectra)[0]
ell=np.arange(np.size(cl),dtype="float")
cmbalm=h.synalm(cl,lmax=3*gset.mmfset.nside)
for ch in gset.mmfset.channels:
fwhm=(gset.mmfset.fwhm[ch]/60.)*np.pi/180.
bl=h.gauss_beam(fwhm,lmax=3*gset.mmfset.nside)
if gset.mmfset.pwc:
pwc=h.pixwin(gset.mmfset.nside)[:np.size(bl)]
bl=bl*pwc
almp=h.almxfl(cmbalm,bl)
cmb=h.alm2map(almp,gset.mmfset.nside,verbose=False)*1e-6
filename=gset.mmfset.paths["cmb"] + "cmb_" + str(int(ch)) + "GHz.fits"
h.write_map(filename,cmb,overwrite=True)
print ch
def extract_tangent_planes(latlon=[],rescale_y=1.):
pc=tpa.tangent_plane_setup(gset.mmfset.nside,gset.mmfset.xsize,60.,-4.,rescale=1.)
if latlon==[]:
p1=tpa.tangent_plane_setup(gset.mmfset.nside,gset.mmfset.xsize,60.,51.,rescale=1.)
else:
p1=tpa.tangent_plane_setup(gset.mmfset.nside,gset.mmfset.xsize,latlon[0],latlon[1],rescale=1.)
cmb=np.zeros((np.size(gset.mmfset.channels),gset.mmfset.npix,gset.mmfset.npix),float)
noise=np.zeros((np.size(gset.mmfset.channels),mmfset.npix,gset.mmfset.npix),float)
cmbfrg=np.zeros((np.size(gset.mmfset.channels),gset.mmfset.npix,gset.mmfset.npix),float)
injclus=np.zeros((np.size(gset.mmfset.channels),gset.mmfset.npix,gset.mmfset.npix),float)
for i,ch in enumerate(gset.mmfset.channels):
# These maps are in uK_RJ units and we are converting them to K_CMB.
filename=gset.mmfset.paths["psm_sims"] + "group2_map_" + str(int(ch)) + "GHz.fits"
tempmap=h.read_map(filename,verbose=False)*gset.mmfset.conv_KRJ_KCMB[ch]*1e-6
cmbfrg[i,:,:]=p1.get_tangent_plane(tempmap)
# These maps are in uK_RJ units and we are converting them to K_CMB.
filename=gset.mmfset.paths["psm_sims"] + "group8_map_" + str(int(ch)) + "GHz.fits"
tempmap=h.read_map(filename,verbose=False)*gset.mmfset.conv_KRJ_KCMB[ch]*1e-6
noise[i,:,:]=p1.get_tangent_plane(tempmap)
filename=gset.mmfset.paths["clusters"] + "cluster_" + str(int(ch)) + "GHz.fits"
tempmap=h.read_map(filename,verbose=False)
injclus[i,:,:]=pc.get_tangent_plane(tempmap)*rescale_y
#filename=gset.mmfset.paths["cmb"] + "cmb_" + str(int(ch)) + "GHz.fits"
filename=gset.mmfset.paths["psm_sims"] + "group3_map_" + str(int(ch)) + "GHz.fits"
tempmap=h.read_map(filename,verbose=False)*gset.mmfset.conv_KRJ_KCMB[ch]*1e-6
cmb[i,:,:]=p1.get_tangent_plane(tempmap)
hdu = fits.ImageHDU()
hdu.header["EXTNAME"]="SZ + CMB"
hdu.data=injclus + cmb + noise/1000.
filename=gset.mmfset.paths["tplanes"] + "sz_cmb.fits"
hdu.writeto(filename,overwrite=True)
hdu = fits.ImageHDU()
hdu.header["EXTNAME"]="SZ + CMB + NOISE"
hdu.data=injclus + cmb + noise
filename=gset.mmfset.paths["tplanes"] + "sz_cmb_noise.fits"
hdu.writeto(filename,overwrite=True)
hdu = fits.ImageHDU()
hdu.header["EXTNAME"]="SZ + CMB + FRG"
hdu.data=injclus + cmbfrg + noise/1000.
filename=gset.mmfset.paths["tplanes"] + "sz_cmb_frg.fits"
hdu.writeto(filename,overwrite=True)
hdu = fits.ImageHDU()
hdu.header["EXTNAME"]="SZ + CMB + FRG + NOISE"
hdu.data=injclus + cmbfrg + noise
filename=gset.mmfset.paths["tplanes"] + "sz_cmb_frg_noise.fits"
hdu.writeto(filename,overwrite=True)
|
[
"adityarotti@gmail.com"
] |
adityarotti@gmail.com
|
c15becfa4319da5909966337a26fc1120853e50b
|
a9cdab74360416d3fde84b2ad471f1f952cee2d0
|
/experiments/icml2020/support/mexlet/jupyter.py
|
a5f6291bb0f9c32cbf93f9c91f6d4566c59c4c7c
|
[
"Apache-2.0"
] |
permissive
|
SoldierY/few-shot-domain-adaptation-by-causal-mechanism-transfer
|
4a025f1537b6523f9b183c031af12b9fac3419ce
|
2878ced51cfe473aad8fbc1886e2b65dfc9fc060
|
refs/heads/master
| 2023-01-23T14:38:20.316244
| 2020-07-03T00:26:34
| 2020-07-03T00:26:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
def print_df(df):
from IPython.display import display
display(df)
return df
|
[
"takeshi.diadochos@gmail.com"
] |
takeshi.diadochos@gmail.com
|
0acbc4230d3ac700fcfc7b8fc86e10326b0de7c7
|
64c204458891a9185d5646979050db05440f6496
|
/api/nest/throttles.py
|
0700aad27ce996af9bd2a82dda0c0bbd22b8050f
|
[] |
no_license
|
radmar2002/data-engineering-exercise
|
136565621922bb9fd807fafee39946326ae5c9a1
|
354729ac69a6e3d96a47f95f54f907b9658d16aa
|
refs/heads/main
| 2023-05-02T17:54:25.721339
| 2021-05-20T19:39:23
| 2021-05-20T19:39:23
| 368,662,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from rest_framework.throttling import UserRateThrottle
# Custom Throttle classes
class LimitedRateThrottle(UserRateThrottle):
scope = 'limited'
class BurstRateThrottle(UserRateThrottle):
scope = 'burst'
|
[
"radu_marius_florin@yahoo.com"
] |
radu_marius_florin@yahoo.com
|
ce3bf83c1f038e8882a5125b033b0f0a361cc05c
|
5c5cac2b58b572cb7e9db7f8d842c42763190c55
|
/schemas/data.py
|
56853dc7625c43dd396ec91713ec9df14d4ae34e
|
[] |
no_license
|
JudeMolloy/munchy
|
0d54593f55000cbeb1dd0bf3a78f7b5ee90af396
|
3b91a62707ceb64abc2d0fafb87a6ac247dd65f7
|
refs/heads/master
| 2023-03-31T00:23:01.561307
| 2021-04-04T02:05:03
| 2021-04-04T02:05:03
| 306,475,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
from ma import ma
from models.data import ClipDataModel
# Not sure I need this but I'll keep for the time being.
class ClipDataSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = ClipDataModel
# load_only = ("name", "bio")
dump_only = ("id")
load_instance = True
|
[
"JudeMolloy07@hotmail.com"
] |
JudeMolloy07@hotmail.com
|
ab5d8fbd62d3448fb69cf6581a66121ca6459a25
|
459929ce79538ec69a6f8c32e608f4e484594d68
|
/venv/Lib/site-packages/virtualbox/__about__.py
|
600822f21eb32a6edbdfa087453d0b2e1ea10fc2
|
[
"Apache-2.0"
] |
permissive
|
yychai97/Kubernetes
|
ec2ef2a98a4588b7588a56b9d661d63222278d29
|
2955227ce81bc21f329729737b5c528b02492780
|
refs/heads/master
| 2023-07-02T18:36:41.382362
| 2021-08-13T04:20:27
| 2021-08-13T04:20:27
| 307,412,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
__title__ = "virtualbox"
__author__ = "Michael Dorman"
__author_email__ = "mjdorma+pyvbox@gmail.com"
__maintainer__ = "Seth Michael Larson"
__maintainer_email__ = "sethmichaellarson@gmail.com"
__version__ = "2.1.1"
__license__ = "Apache-2.0"
__url__ = "https://github.com/sethmlarson/virtualbox-python"
|
[
"49704239+yychai97@users.noreply.github.com"
] |
49704239+yychai97@users.noreply.github.com
|
645f7220a627c6fd08ebb1622817b66a7a41d832
|
601ac0c9f7138b3e506c0511d4a3e7f60a499305
|
/src/pykeen/datasets/freebase.py
|
bf7535538e05734b6ca08bdb377b4fa6ed9772b0
|
[
"MIT"
] |
permissive
|
cdpierse/pykeen
|
9aa551adc05c9e609353d473db1d3da1b92f4ab0
|
e8225c066b56bcdd3180ba895ce3e153808e7e38
|
refs/heads/master
| 2023-09-02T06:30:25.849873
| 2021-11-09T17:32:15
| 2021-11-09T17:32:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,959
|
py
|
# -*- coding: utf-8 -*-
"""Freebase datasets.
* FB15k
* FB15k-237
"""
import os
import click
from docdata import parse_docdata
from more_click import verbose_option
from .base import PackedZipRemoteDataset, TarFileRemoteDataset
__all__ = [
"FB15k",
"FB15k237",
]
@parse_docdata
class FB15k(TarFileRemoteDataset):
"""The FB15k dataset.
---
name: FB15k
statistics:
entities: 14951
relations: 1345
training: 483142
testing: 59071
validation: 50000
triples: 592213
citation:
author: Bordes
year: 2013
link: http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data.pdf
"""
def __init__(self, create_inverse_triples: bool = False, **kwargs):
"""Initialize the FreeBase 15K dataset.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.TarFileRemoteDataset`.
.. warning:: This dataset contains testing leakage. Use :class:`FB15k237` instead.
"""
super().__init__(
url="https://everest.hds.utc.fr/lib/exe/fetch.php?media=en:fb15k.tgz",
relative_training_path=os.path.join("FB15k", "freebase_mtr100_mte100-train.txt"),
relative_testing_path=os.path.join("FB15k", "freebase_mtr100_mte100-test.txt"),
relative_validation_path=os.path.join("FB15k", "freebase_mtr100_mte100-valid.txt"),
create_inverse_triples=create_inverse_triples,
**kwargs,
)
@parse_docdata
class FB15k237(PackedZipRemoteDataset):
"""The FB15k-237 dataset.
---
name: FB15k-237
statistics:
entities: 14505
relations: 237
training: 272115
testing: 20438
validation: 17526
triples: 310079
citation:
author: Toutanova
year: 2015
link: https://www.aclweb.org/anthology/W15-4007/
"""
def __init__(self, create_inverse_triples: bool = False, **kwargs):
"""Initialize the FreeBase 15K (237) dataset.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.ZipFileRemoteDataset`.
"""
super().__init__(
url="https://download.microsoft.com/download/8/7/0/8700516A-AB3D-4850-B4BB-805C515AECE1/FB15K-237.2.zip",
relative_training_path=os.path.join("Release", "train.txt"),
relative_testing_path=os.path.join("Release", "test.txt"),
relative_validation_path=os.path.join("Release", "valid.txt"),
create_inverse_triples=create_inverse_triples,
**kwargs,
)
@click.command()
@verbose_option
def _main():
for cls in [FB15k, FB15k237]:
cls().summarize()
if __name__ == "__main__":
_main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
54209078b41164b418129f1145ddb6ba7120dacd
|
bc243dc880dae80e612df65e04f2ff747412c2d2
|
/version_0.1/trainingModel.py
|
6e0b5630f11e196eec8538ad0c2a219482c723bb
|
[] |
no_license
|
francs1/ML-Kaggle-Titanic
|
837f7c36d19b3bfe5c4a596f7f7da54d3e485bae
|
9596af99b2f476a413573266d0f1e034e21da810
|
refs/heads/master
| 2020-06-17T23:01:33.430264
| 2019-07-11T01:13:51
| 2019-07-11T01:13:51
| 196,090,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
# trainingModel.py
import sys
import pandas as pd
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
from sklearn import linear_model
from constantInit import *
import dataLoad as dl
import preTreatment as pt
np.random.seed(42)
def trainLR():
passenger_prepared,passenger_labels,test_set,passenger_test = pt.preProcessData()
lr_clf = linear_model.LogisticRegression(C=1.0, penalty='l2', tol=1e-6, solver='lbfgs')
passenger_labels = passenger_labels.values.reshape((len(passenger_labels.values),))
lr_clf.fit(passenger_prepared, passenger_labels)
print(cross_val_score(lr_clf, passenger_prepared, passenger_labels, cv=3, scoring="accuracy"))
predict_data = lr_clf.predict(passenger_test)
PassengerIds = test_set['PassengerId']
results = pd.Series(predict_data,name="Survived",dtype=np.int32)
submission = pd.concat([PassengerIds,results],axis = 1)
dl.saveData(submission,'LRClassifier.csv')
def trainSGD():
passenger_prepared,passenger_labels,test_set,passenger_test = pt.preProcessData()
sgd_clf = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42)
passenger_labels = passenger_labels.values.reshape((len(passenger_labels.values),))
sgd_clf.fit(passenger_prepared, passenger_labels)
print(cross_val_score(sgd_clf, passenger_prepared, passenger_labels, cv=3, scoring="accuracy"))
predict_data = sgd_clf.predict(passenger_test)
PassengerIds = test_set['PassengerId']
results = pd.Series(predict_data,name="Survived",dtype=np.int32)
submission = pd.concat([PassengerIds,results],axis = 1)
dl.saveData(submission,'SGDClassifier.csv')
|
[
"noreply@github.com"
] |
noreply@github.com
|
d8600af0b88b95f8cda4ccde3d48eef8e17c2e47
|
436177bf038f9941f67e351796668700ffd1cef2
|
/venv/Lib/site-packages/sklearn/tests/test_calibration.py
|
b8585c22bb36278fb772ff8f40c7129b07a1ad2e
|
[] |
no_license
|
python019/matplotlib_simple
|
4359d35f174cd2946d96da4d086026661c3d1f9c
|
32e9a8e773f9423153d73811f69822f9567e6de4
|
refs/heads/main
| 2023-08-22T18:17:38.883274
| 2021-10-07T15:55:50
| 2021-10-07T15:55:50
| 380,471,961
| 29
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,088
|
py
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
assert_raises, ignore_warnings)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@pytest.fixture(scope="module")
def data():
X, y = make_classification(
n_samples=200, n_features=6, random_state=42
)
return X, y
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
assert_raises(ValueError, cal_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf,
prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf,
1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss((y_test + 1) % 2,
prob_pos_cal_clf_relabeled))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_bad_method(data, ensemble):
# Check only "isotonic" and "sigmoid" are accepted as methods
X, y = data
clf = LinearSVC()
clf_invalid_method = CalibratedClassifierCV(
clf, method="foo", ensemble=ensemble
)
with pytest.raises(ValueError):
clf_invalid_method.fit(X, y)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_regressor(data, ensemble):
# `base-estimator` should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
X, y = data
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble)
with pytest.raises(RuntimeError):
clf_base_regressor.fit(X, y)
def test_calibration_default_estimator(data):
# Check base_estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].base_estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(
clf, X, y, cv=3, method='decision_function'
)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0., 0.1, 0.2, 0.5, 0.9, 1.])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy='quantile')
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
assert_raises(ValueError, calibration_curve, y_true2, y_pred2,
strategy='percentile')
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', SimpleImputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(
clf, cv=2, method='isotonic', ensemble=ensemble
)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1:] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('X', [np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6)])
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def text_data():
text_data = [
{'state': 'NY', 'age': 'adult'},
{'state': 'TX', 'age': 'adult'},
{'state': 'VT', 'age': 'child'},
]
text_labels = [1, 0, 1]
return text_data, text_labels
@pytest.fixture
def text_data_pipeline(text_data):
X, y = text_data
pipeline_prefit = Pipeline([
('vectorizer', DictVectorizer()),
('clf', RandomForestClassifier())
])
return pipeline_prefit.fit(X, y)
def test_calibration_pipeline(text_data, text_data_pipeline):
# Test that calibration works in prefit pipeline with transformer,
# where `X` is not array-like, sparse matrix or dataframe at the start.
# See https://github.com/scikit-learn/scikit-learn/issues/8710
X, y = text_data
clf = text_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
msg = "'CalibratedClassifierCV' object has no attribute"
with pytest.raises(AttributeError, match=msg):
calib_clf.n_features_in_
@pytest.mark.parametrize('clf, cv', [
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), 'prefit'),
])
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
if cv == 'prefit':
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == 'prefit':
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
# FIXME: remove in 1.1
def test_calibrated_classifier_cv_deprecation(data):
# Check that we raise the proper deprecation warning if accessing
# `calibrators_` from the `_CalibratedClassifier`.
X, y = data
calib_clf = CalibratedClassifierCV(cv=2).fit(X, y)
with pytest.warns(FutureWarning):
calibrators = calib_clf.calibrated_classifiers_[0].calibrators_
for clf1, clf2 in zip(
calibrators, calib_clf.calibrated_classifiers_[0].calibrators
):
assert clf1 is clf2
|
[
"82611064+python019@users.noreply.github.com"
] |
82611064+python019@users.noreply.github.com
|
dcaa58a6041377c2b534b68af4ccb0c26cce69c5
|
c54f0a6db830d8811638391600cfc3431fba5798
|
/task/utils.py
|
ef788b38ba39d57e15d901f08d419a0218e8425d
|
[] |
no_license
|
las1m0n/GeoInsightFetcher
|
ed21172c4777a9b017230d0bc78df1ac1704c8ae
|
5470189a0c46b5034e6afbe3ea783ca96246d2c6
|
refs/heads/master
| 2023-02-28T15:33:30.095906
| 2021-02-09T22:11:32
| 2021-02-09T22:11:32
| 337,206,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
from os import path
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='GeoInsightFetcher')
parser.add_argument(help='Cities names', nargs="+", dest="cities")
parser.add_argument('-f', "--file", help='file with cities names', dest="file")
return parser.parse_args()
def file_check(file_name):
if path.isfile(file_name) and file_name.lower().endswith((".txt",)):
return True
return False
def read_file(file_name):
if file_check(file_name):
with open(file_name, "r+") as file:
result = file.read().split('\n')
return result
return [None]
|
[
"bondarenkonikita295@gmail.com"
] |
bondarenkonikita295@gmail.com
|
a0042c9289a2cd05dd4f98373cb1f9d55fe146b2
|
792e4db2857ddbd21c33541d220531b38af08d86
|
/机器学习算法/SVM.py
|
e6c93497c8f06761482cc4eb08bf54422563a6ba
|
[] |
no_license
|
moyuweiqing/A-stock-prediction-algorithm-based-on-machine-learning
|
4c216d02edeaaf074c4c20b050fcbda1fe289bb6
|
f6a1a4f5e305b59950a5b94180067a2bb265e235
|
refs/heads/master
| 2023-04-11T02:24:20.310395
| 2023-03-30T07:54:35
| 2023-03-30T07:54:35
| 246,313,173
| 292
| 61
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,572
|
py
|
import pandas as pd
from sklearn import svm,preprocessing
import tushare as ts
class SVM_Predict:
stock_code = ''
tsData = pd.DataFrame()
def __init__(self, stock_code):
self.stock_code = stock_code
def date_setting(self, start_date, end_date):
self.tsData = ts.get_hist_data(code=self.stock_code, start=start_date, end=end_date)
self.tsData = self.tsData.reset_index()
def makeSVMPrediction(self, rate): # rate表示训练集和测试集的比例
df_CB = self.tsData.sort_index(ascending=True, axis=0)
df_CB = df_CB.set_index('date')
df_CB = df_CB.sort_index()
# value表示涨跌, =1为涨,=0为跌
value = pd.Series(df_CB['close'] - df_CB['close'].shift(1), \
index=df_CB.index)
value = value.bfill()
value[value >= 0] = 1
value[value < 0] = 0
df_CB['Value'] = value
# 后向填充空缺值
df_CB = df_CB.fillna(method='bfill')
df_CB = df_CB.astype('float64')
print(df_CB.head())
L = len(df_CB)
train = int(L * rate)
total_predict_data = L - train
# 对样本特征进行归一化处理
df_CB_X = df_CB.drop(['Value'], axis=1)
df_CB_X = preprocessing.scale(df_CB_X)
# 开始循环预测,每次向前预测一个值
correct = 0
train_original = train
while train < L:
Data_train = df_CB_X[train - train_original:train]
value_train = value[train - train_original:train]
Data_predict = df_CB_X[train:train + 1]
value_real = value[train:train + 1]
# 核函数分别选取'ploy','linear','rbf'
# classifier = svm.SVC(C=1.0, kernel='poly')
# classifier = svm.SVC(kernel='linear')
classifier = svm.SVC(C=1.0, kernel='rbf')
classifier.fit(Data_train, value_train)
value_predict = classifier.predict(Data_predict)
print("value_real=%d value_predict=%d" % (value_real[0], value_predict))
# 计算测试集中的正确率
if (value_real[0] == int(value_predict)):
correct = correct + 1
train = train + 1
print(correct)
print(total_predict_data)
correct = correct * 100 / total_predict_data
print("Correct=%.2f%%" % correct)
a = SVM_Predict('000001')
a.date_setting(start_date='2019-05-12', end_date='2019-12-19')
a.makeSVMPrediction(0.8)
|
[
"noreply@github.com"
] |
noreply@github.com
|
312338e6323865f7023b9bfdb82286528740a82e
|
0081be1a6e5dbe941d1b4cdefd4294f4f0753e38
|
/ZYLABS_3/ZYLAB_10.11.py
|
4ccf7426aed2a7ed055df7f62255ef14fdbe7396
|
[] |
no_license
|
KatherynBusch/CIS-2348--14911
|
b5dda8d67183626b81d561abd6734e858884a256
|
c0f9b448708660de254ba8ee0baf3c6fa82f3d92
|
refs/heads/master
| 2023-01-28T13:29:30.303081
| 2020-12-14T06:56:35
| 2020-12-14T06:56:35
| 290,910,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
# Katheryn Busch PSID: 1868948
class FoodItem:
def __init__(self, name="None", fat=0.0, carb=0.0, protein=0.0):
self.name = name
self.fat = fat
self.carb = carb
self.protein = protein
def get_calories(self, servings):
calories = ((self.fat * 9) + (self.carb * 4) + (self.protein * 4)) * servings;
return calories
def print_info(self):
print('Nutritional information per serving of {}:'.format(self.name))
print(' Fat: {:.2f} g'.format(self.fat))
print(' Carbohydrates: {:.2f} g'.format(self.carb))
print(' Protein: {:.2f} g'.format(self.protein))
if __name__ == "__main__":
food1 = FoodItem()
item = input()
fatamt = float(input())
carbamt = float(input())
proteinamt = float(input())
item2 = FoodItem(item, fatamt, carbamt, proteinamt)
servings = float(input())
food1.print_info()
print('Number of calories for {:.2f} serving(s): {:.2f}'.format(servings, food1.get_calories(servings)))
print()
item2.print_info()
print('Number of calories for {:.2f} serving(s): {:.2f}'.format(servings, item2.get_calories(servings)))
item2.get_calories(servings)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6c35b1cfd607a05efda8da84959d5075ad4cce77
|
4db21365bd1f78d0c3258efba0af2cb10696fa32
|
/main/settings.py
|
073da02a715681d860283236b176447a2fa5284c
|
[] |
no_license
|
gichimux/beegee_cms
|
e62a2da86cc23395f3ce8a1dc3041dc9742a2315
|
c4395f6d0bc334cb4158208d6d2a124c70da9ed0
|
refs/heads/master
| 2020-08-07T21:11:53.345827
| 2019-10-08T08:53:52
| 2019-10-08T08:53:52
| 213,588,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,088
|
py
|
"""
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-^@!%3l)0af)45%2l=mdl8zspo$y1ob6ntx^^c*-v&=g&u!vmk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"gichimumwai1@gmail.com"
] |
gichimumwai1@gmail.com
|
96dbfb206fea6616d529302a4dd2d8b79d04dcdb
|
4e8876d7b29cf9fb05849da77553b8a7e3783bdc
|
/src/plugins/processing/algs/gdal/contour.py
|
e25947294d997251bea2fcf065aa480f5e025270
|
[] |
no_license
|
hydrology-tep/hep-qgis-plugin-lite
|
48477f504b6fc1a9a9446c7c7f5666f4b2ccfee7
|
781cbaa1b3e9331de6741dd44a22322048ab176c
|
refs/heads/master
| 2021-03-27T17:01:18.284421
| 2018-06-27T12:09:58
| 2018-06-27T12:09:58
| 70,825,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,908
|
py
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
contour.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class contour(GdalAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
OUTPUT_VECTOR = 'OUTPUT_VECTOR'
INTERVAL = 'INTERVAL'
FIELD_NAME = 'FIELD_NAME'
EXTRA = 'EXTRA'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'contour.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Contour')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Extraction')
self.addParameter(ParameterRaster(self.INPUT_RASTER,
self.tr('Input layer'), False))
self.addParameter(ParameterNumber(self.INTERVAL,
self.tr('Interval between contour lines'), 0.0,
99999999.999999, 10.0))
self.addParameter(ParameterString(self.FIELD_NAME,
self.tr('Attribute name (if not set, no elevation attribute is attached)'),
'ELEV', optional=True))
self.addParameter(ParameterString(self.EXTRA,
self.tr('Additional creation parameters'), '', optional=True))
self.addOutput(OutputVector(self.OUTPUT_VECTOR,
self.tr('Contours')))
def getConsoleCommands(self):
output = self.getOutputValue(self.OUTPUT_VECTOR)
interval = unicode(self.getParameterValue(self.INTERVAL))
fieldName = unicode(self.getParameterValue(self.FIELD_NAME))
extra = self.getParameterValue(self.EXTRA)
if extra is not None:
extra = unicode(extra)
arguments = []
if len(fieldName) > 0:
arguments.append('-a')
arguments.append(fieldName)
arguments.append('-i')
arguments.append(interval)
driver = GdalUtils.getVectorDriverFromFileName(output)
arguments.append('-f')
arguments.append(driver)
if extra and len(extra) > 0:
arguments.append(extra)
arguments.append(self.getParameterValue(self.INPUT_RASTER))
arguments.append(output)
return ['gdal_contour', GdalUtils.escapeAndJoin(arguments)]
|
[
"joaa@localhost.localdomain"
] |
joaa@localhost.localdomain
|
4749bf6ccf6bd5a56d395c5462ac67cbfea6b435
|
7936ebf5b94c3d153fb55248b52db2eff724427c
|
/11/homework11/zhihu_top100.py
|
6fb64a0e8b94e74945b6a87d6f31271cd6307984
|
[
"MIT"
] |
permissive
|
xiaodongzi/pytohon_teach_material
|
f9e95f7b294a9e49d86d1a8e25cbef5efef3aaf7
|
13ed128a993637d0203f1f8c5419d781d7212883
|
refs/heads/master
| 2021-05-30T09:48:16.898483
| 2016-01-24T17:02:34
| 2016-01-24T17:02:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# coding: utf-8
import requests
from pyquery import PyQuery as pq
question_num = 1
page_num = 1
to_stop = False
scrap_questions_num = 100
while True:
url = "http://www.zhihu.com/topic/19776749/top-answers?page=%d" % (page_num)
res = requests.get(url)
# print res.encoding
for p in pq(res.content).find('.feed-main'):
# print type(p)
print question_num, '. ' ,pq(p).find('.question_link').text()
relative_link = pq(p).find('.question_link').attr('href')
absolute_link = 'http://www.zhihu.com' + relative_link
print ' 链接 ', absolute_link
print ' vote: ', pq(p).find('.zm-item-vote-count').text()
print ' 回答摘要'
print ' ', pq(p).find('.zh-summary').text()[:-4]
print '-' * 60
print
if question_num == scrap_questions_num:
to_stop = True
break
question_num += 1
page_num += 1
if to_stop ==True:
break
|
[
"seerjk@gmail.com"
] |
seerjk@gmail.com
|
602d262d87637b80483ed8cf1d334baa5e133929
|
71916e7013d190fb86ab8c666849d3a00cf63484
|
/Database/naman.py
|
71443a1fe8585e9f6799ddabc132967c88d5e249
|
[] |
no_license
|
nrv3098/Projects
|
e161d892f28970e11403d966c142e3a43e57c2ee
|
60662573dcf1429240d1c38225ea46fd84ce584f
|
refs/heads/master
| 2020-04-09T01:52:47.988525
| 2018-12-01T07:44:00
| 2018-12-01T07:44:00
| 159,920,611
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,349
|
py
|
import tkinter as tk
from tkinter import *
import sqlite3
from tkinter import messagebox
from tkinter import simpledialog
conn = sqlite3.connect('Database.db')
c = conn.cursor()
def create_table(e1,e2,e3,e4,var):
str1=e1.get()
str2=e2.get()
str3=e3.get()
str4=e4.get()
str5=var.get()
#print(str2,str1,str5,str4,str3)
c.execute('CREATE TABLE IF NOT EXISTS Database(str2 TEXT, str1 TEXT,str5 INT, str4 TEXT,str3 TEXT)')
c.execute("INSERT INTO Database( str2, str1,str5,str4,str3) VALUES (?, ?, ?, ?, ?)",
(str2,str1,str5,str4,str3))
conn.commit()
def display():
c.execute('SELECT * FROM Database')
for row in c.fetchall():
print(row)
def enter():
root.destroy()
frame1=Tk()
frame1.title("Enter details")
frame1.geometry("1000x1000")
frame1.wm_iconbitmap('winimgg.ico')
l1=Label(frame1,text="Name:",width=10,height=2,font=('Courier',-20,'bold'),fg='black')
l2=Label(frame1,text="Roll No.:",width=10,height=2,font=('Courier',-20,'bold'),fg='black')
l3=Label(frame1,text="Gender:",width=10,height=2,font=('Courier',-20,'bold'),fg='black')
l4=Label(frame1,text="D.O.B.:",width=10,height=2,font=('Courier',-20,'bold'),fg='black')
l5=Label(frame1,text="Address:",width=10,height=2,font=('Courier',-20,'bold'),fg='black')
l1.place(x=5,y=50)
l2.place(x=5,y=100)
l3.place(x=5,y=150)
l4.place(x=5,y=200)
l5.place(x=5,y=250)
e1=Entry(frame1,width=30,fg='black',bg='white',font=('Arial',14))
e2=Entry(frame1,width=20,fg='black',bg='white',font=('Arial',14))
e3=Entry(frame1,width=20,fg='black',bg='white',font=('Arial',14))
e4=Entry(frame1,width=20,fg='black',bg='white',font=('Arial',14))
e1.place(x=120,y=60)
e2.place(x=120,y=110)
e3.place(x=120,y=210)
e4.place(x=120,y=260)
var=StringVar()
r1=Radiobutton(frame1,text='Female',value='Female',variable=var,font=('Courier',-20),fg='black')
r2=Radiobutton(frame1,text='Male',value='Male',variable=var,font=('Courier',-20),fg='black')
r1.place(x=130,y=160)
r2.place(x=250,y=160)
b1 = tk.Button(frame1,text="Submit",command=lambda : create_table(e1,e2,e3,e4,var))
b1.place(x=300,y=550)
def view():
root.destroy()
frame2=Tk()
frame2.title("Details")
frame2.geometry("1000x1000")
frame2.wm_iconbitmap('winimgg.ico')
c.execute('SELECT * FROM Database')
x = c.fetchall()
j=100
i=1
for row in x:
lt=Label(frame2,text="Sno. Name Rno. Gender Add DOB",font=('Courier',-20,'bold'),fg='black')
le=Label(frame2,text=i,font=('Courier',-20),fg='black')
l = Label(frame2,text=row,font=('Courier',-20),fg='black')
l.place(x=25,y=j)
le.place(x=5,y=j)
lt.place(x=2,y=25)
j=j+50
i=i+1
print(row)
def search():
rno = tk.simpledialog.askstring("ROLL NO","ENTER A ROLL NUMBER")
roll_no = (rno,)
c.execute('SELECT * FROM Database WHERE str2=?',roll_no)
for row in c.fetchall():
root.destroy()
frame3=Tk()
frame3.title("Searching")
frame3.geometry("500x500")
l1=Label(frame3,text=row,width=35,height=5,font=('Courier',-20,'bold'),fg='black')
l1.place(x=5,y=50)
def delete():
name=tk.simpledialog.askstring("NAME","ENTER NAME")
nm=(name,)
c.execute("DELETE FROM Database WHERE str1=?", nm)
g=50
for row in c.fetchall():
frame4=Tk()
frame4.title("Updated records")
frame4.geometry("500x500")
l8=Label(frame4,text=row,width=35,height=5,font=('Courier',-20,'bold'),fg='black')
l8.place(x=5,y=g)
g=g+50
conn.commit()
root=Tk()
root.title("Student Database Management System")
root.geometry("1000x1000")
root.wm_iconbitmap('winimgg.ico')
myImg = PhotoImage(file= "windowwwwww.png")
btn= Button(root, image=myImg)
btn.place(x=440,y=50)
root.wm_iconbitmap('winimgg.ico')
b2 = tk.Button(root, text="Enter details", command=enter)
b2.place(x=150,y=290)
b3= tk.Button(root,text="View details",command=view)
b3.place(x=350,y=290)
b4 = tk.Button(root,text="Search",command=search)
b4.place(x=550,y=290)
b5=tk.Button(root,text="Delete Record",command=delete)
b5.place(x=750,y=290)
root.mainloop()
|
[
"nrv3098@gmail.com"
] |
nrv3098@gmail.com
|
65dce79f30aec774c93591b408df883154300b38
|
fee003cd665885f52f6f268855bd40039030e8b5
|
/bolsa/backend.py
|
ed75ba7400b64fc065447aac0e10e3538b045aa2
|
[] |
no_license
|
gicornachini/bolsa
|
044cc1f0f3441d184eb87cdcca30bc2dea4881f5
|
2133c96c58f371e01a11f37bbe32152d3d77d48a
|
refs/heads/master
| 2023-06-30T07:59:53.155718
| 2021-03-27T21:37:24
| 2021-03-27T21:37:24
| 299,123,383
| 60
| 22
| null | 2021-09-16T11:24:20
| 2020-09-27T21:34:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,401
|
py
|
import asyncio
import logging
from functools import cached_property
import aiohttp
from bolsa.connector import B3HttpClientConnector
from bolsa.http_client import B3HttpClient
from bolsa.responses import (
GetBrokerAccountAssetExtractResponse,
GetBrokerAccountResponse,
GetBrokersResponse
)
logger = logging.getLogger(__name__)
POOL_CONNECTOR = B3HttpClientConnector()
class B3AsyncBackend():
def __init__(self, username, password, captcha_service):
self._connector = POOL_CONNECTOR.get_connector()
self.username = username
self.password = password
self.captcha_service = captcha_service
@cached_property
def _session(self):
logger.info(f'Creating session for username: {self.username}')
return aiohttp.ClientSession(
connector=self._connector,
connector_owner=False
)
@cached_property
def _http_client(self):
return B3HttpClient(
username=self.username,
password=self.password,
session=self._session,
captcha_service=self.captcha_service
)
async def session_close(self):
await self._session.close()
async def connection_close(self):
await self._connector.close()
async def get_brokers(self):
response = await self._http_client.get_brokers()
response_class = GetBrokersResponse(response)
return await response_class.data()
async def get_broker_accounts(self, broker):
response = await self._http_client.get_broker_accounts(broker)
response_class = GetBrokerAccountResponse(
response=response,
broker=broker
)
return await response_class.data()
async def get_brokers_with_accounts(self):
brokers = await self.get_brokers()
brokers_account_routine = [
asyncio.create_task(
self.get_broker_accounts(broker)
)
for broker in brokers
]
return await asyncio.gather(*brokers_account_routine)
async def get_broker_account_portfolio_assets_extract(
self,
account_id,
broker_value,
broker_parse_extra_data,
account_parse_extra_data
):
response = await self._http_client.get_broker_account_portfolio_assets_extract( # NOQA
account_id,
broker_value,
broker_parse_extra_data,
account_parse_extra_data
)
response_class = GetBrokerAccountAssetExtractResponse(
response=response,
broker_value=broker_value
)
return await response_class.data()
async def get_brokers_account_portfolio_assets_extract(self, brokers):
brokers_account_assets_extract_routine = [
asyncio.create_task(
self.get_broker_account_portfolio_assets_extract(
account_id=broker.accounts[0].id,
broker_value=broker.value,
broker_parse_extra_data=broker.parse_extra_data,
account_parse_extra_data=(
broker.accounts[0].parse_extra_data
)
)
)
for broker in brokers
if len(broker.accounts) > 0
]
return await asyncio.gather(*brokers_account_assets_extract_routine)
|
[
"giovannicornachini@gmail.com"
] |
giovannicornachini@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.