hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
66d48aee09df4482ae72248fe5cf698ffaffa928
| 106
|
py
|
Python
|
tests/test_example.py
|
evrom/python-package
|
d7d0daec13da4ade9b7d2c96097c19dff6ba187a
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_example.py
|
evrom/python-package
|
d7d0daec13da4ade9b7d2c96097c19dff6ba187a
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_example.py
|
evrom/python-package
|
d7d0daec13da4ade9b7d2c96097c19dff6ba187a
|
[
"BSD-2-Clause"
] | null | null | null |
from unittest import TestCase
| 15.142857
| 32
| 0.726415
|
from unittest import TestCase
class ExampleTestCase(TestCase):
def test_example(self):
pass
| 15
| 11
| 49
|
33522d5a0e5a1a2934f46eb372aaa91d5264c310
| 193
|
py
|
Python
|
5. Strings/multi_line_strings.py
|
VasuGoel/python-personal-notes
|
752c84533677e30e2abdaaf288ed7cf43220bd42
|
[
"MIT"
] | 1
|
2019-09-04T12:08:29.000Z
|
2019-09-04T12:08:29.000Z
|
5. Strings/multi_line_strings.py
|
VasuGoel/python-personal-notes
|
752c84533677e30e2abdaaf288ed7cf43220bd42
|
[
"MIT"
] | null | null | null |
5. Strings/multi_line_strings.py
|
VasuGoel/python-personal-notes
|
752c84533677e30e2abdaaf288ed7cf43220bd42
|
[
"MIT"
] | 2
|
2019-09-04T12:08:30.000Z
|
2020-10-13T16:18:58.000Z
|
greeting_message = '''
Hi John,
We have received your purchase request successfully. We'll email you when after the package is dispatched.
Thanks,
Support Team
'''
print(greeting_message)
| 14.846154
| 106
| 0.766839
|
greeting_message = '''
Hi John,
We have received your purchase request successfully. We'll email you when after the package is dispatched.
Thanks,
Support Team
'''
print(greeting_message)
| 0
| 0
| 0
|
bc42ceed708f7435a7fe173be8580c9dbe9c5fd0
| 8,274
|
py
|
Python
|
daisy/utils/splitter.py
|
roger-zhe-li/daisyRec
|
421d16adbb98a5b75134931b1cde5ac42857ef52
|
[
"Apache-2.0"
] | 1
|
2021-04-04T11:52:08.000Z
|
2021-04-04T11:52:08.000Z
|
daisy/utils/splitter.py
|
roger-zhe-li/daisyRec
|
421d16adbb98a5b75134931b1cde5ac42857ef52
|
[
"Apache-2.0"
] | null | null | null |
daisy/utils/splitter.py
|
roger-zhe-li/daisyRec
|
421d16adbb98a5b75134931b1cde5ac42857ef52
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, train_test_split, GroupShuffleSplit
def split_test(df, test_method='fo', test_size=.2):
"""
method of splitting data into training data and test data
Parameters
----------
df : pd.DataFrame raw data waiting for test set splitting
test_method : str, way to split test set
'fo': split by ratio
'tfo': split by ratio with timestamp
'tloo': leave one out with timestamp
'loo': leave one out
'ufo': split by ratio in user level
'utfo': time-aware split by ratio in user level
test_size : float, size of test set
Returns
-------
train_set : pd.DataFrame training dataset
test_set : pd.DataFrame test dataset
"""
train_set, test_set = pd.DataFrame(), pd.DataFrame()
if test_method == 'ufo':
# driver_ids = df['user']
# _, driver_indices = np.unique(np.array(driver_ids), return_inverse=True)
# gss = GroupShuffleSplit(n_splits=1, test_size=test_size, random_state=2020)
# for train_idx, test_idx in gss.split(df, groups=driver_indices):
# train_set, test_set = df.loc[train_idx, :].copy(), df.loc[test_idx, :].copy()
test_idx = df.groupby('user').apply(
lambda x: x.sample(frac=test_size).index
).explode().values
train_set = df[~df.index.isin(test_idx)]
test_set = df.iloc[test_idx]
elif test_method == 'utfo':
df = df.sort_values(['user', 'timestamp']).reset_index(drop=True)
test_index = df.groupby('user').apply(time_split).explode().values
test_set = df.loc[test_index, :]
train_set = df[~df.index.isin(test_index)]
elif test_method == 'tfo':
# df = df.sample(frac=1)
df = df.sort_values(['timestamp']).reset_index(drop=True)
split_idx = int(np.ceil(len(df) * (1 - test_size)))
train_set, test_set = df.iloc[:split_idx, :].copy(), df.iloc[split_idx:, :].copy()
elif test_method == 'fo':
train_set, test_set = train_test_split(df, test_size=test_size, random_state=2019)
elif test_method == 'tloo':
# df = df.sample(frac=1)
df = df.sort_values(['timestamp']).reset_index(drop=True)
df['rank_latest'] = df.groupby(['user'])['timestamp'].rank(method='first', ascending=False)
train_set, test_set = df[df['rank_latest'] > 1].copy(), df[df['rank_latest'] == 1].copy()
del train_set['rank_latest'], test_set['rank_latest']
elif test_method == 'loo':
# # slow method
# test_set = df.groupby(['user']).apply(pd.DataFrame.sample, n=1).reset_index(drop=True)
# test_key = test_set[['user', 'item']].copy()
# train_set = df.set_index(['user', 'item']).drop(pd.MultiIndex.from_frame(test_key)).reset_index().copy()
# # quick method
test_index = df.groupby(['user']).apply(lambda grp: np.random.choice(grp.index))
test_set = df.loc[test_index, :].copy()
train_set = df[~df.index.isin(test_index)].copy()
else:
raise ValueError('Invalid data_split value, expect: loo, fo, tloo, tfo')
train_set, test_set = train_set.reset_index(drop=True), test_set.reset_index(drop=True)
return train_set, test_set
def split_validation(train_set, val_method='fo', fold_num=1, val_size=.1):
"""
method of split data into training data and validation data.
(Currently, this method returns list of train & validation set, but I'll change
it to index list or generator in future so as to save memory space) TODO
Parameters
----------
train_set : pd.DataFrame train set waiting for split validation
val_method : str, way to split validation
'cv': combine with fold_num => fold_num-CV
'fo': combine with fold_num & val_size => fold_num-Split by ratio(9:1)
'tfo': Split by ratio with timestamp, combine with val_size => 1-Split by ratio(9:1)
'tloo': Leave one out with timestamp => 1-Leave one out
'loo': combine with fold_num => fold_num-Leave one out
'ufo': split by ratio in user level with K-fold
'utfo': time-aware split by ratio in user level
fold_num : int, the number of folder need to be validated, only work when val_method is 'cv', 'loo', or 'fo'
val_size: float, the size of validation dataset
Returns
-------
train_set_list : List, list of generated training datasets
val_set_list : List, list of generated validation datasets
cnt : cnt: int, the number of train-validation pair
"""
if val_method in ['tloo', 'tfo', 'utfo']:
cnt = 1
elif val_method in ['cv', 'loo', 'fo', 'ufo']:
cnt = fold_num
else:
raise ValueError('Invalid val_method value, expect: cv, loo, tloo, tfo')
train_set_list, val_set_list = [], []
if val_method == 'ufo':
driver_ids = train_set['user']
_, driver_indices = np.unique(np.array(driver_ids), return_inverse=True)
gss = GroupShuffleSplit(n_splits=fold_num, test_size=val_size, random_state=2020)
for train_idx, val_idx in gss.split(train_set, groups=driver_indices):
train_set_list.append(train_set.loc[train_idx, :])
val_set_list.append(train_set.loc[val_idx, :])
if val_method == 'utfo':
train_set = train_set.sort_values(['user', 'timestamp']).reset_index(drop=True)
val_index = train_set.groupby('user').apply(time_split).explode().values
val_set = train_set.loc[val_index, :]
train_set = train_set[~train_set.index.isin(val_index)]
train_set_list.append(train_set)
val_set_list.append(val_set)
if val_method == 'cv':
kf = KFold(n_splits=fold_num, shuffle=False, random_state=2019)
for train_index, val_index in kf.split(train_set):
train_set_list.append(train_set.loc[train_index, :])
val_set_list.append(train_set.loc[val_index, :])
if val_method == 'fo':
for _ in range(fold_num):
train, validation = train_test_split(train_set, test_size=val_size)
train_set_list.append(train)
val_set_list.append(validation)
elif val_method == 'tfo':
# train_set = train_set.sample(frac=1)
train_set = train_set.sort_values(['timestamp']).reset_index(drop=True)
split_idx = int(np.ceil(len(train_set) * (1 - val_size)))
train_set_list.append(train_set.iloc[:split_idx, :])
val_set_list.append(train_set.iloc[split_idx:, :])
elif val_method == 'loo':
for _ in range(fold_num):
val_index = train_set.groupby(['user']).apply(lambda grp: np.random.choice(grp.index))
val_set = train_set.loc[val_index, :].reset_index(drop=True).copy()
sub_train_set = train_set[~train_set.index.isin(val_index)].reset_index(drop=True).copy()
train_set_list.append(sub_train_set)
val_set_list.append(val_set)
elif val_method == 'tloo':
# train_set = train_set.sample(frac=1)
train_set = train_set.sort_values(['timestamp']).reset_index(drop=True)
train_set['rank_latest'] = train_set.groupby(['user'])['timestamp'].rank(method='first', ascending=False)
new_train_set = train_set[train_set['rank_latest'] > 1].copy()
val_set = train_set[train_set['rank_latest'] == 1].copy()
del new_train_set['rank_latest'], val_set['rank_latest']
train_set_list.append(new_train_set)
val_set_list.append(val_set)
return train_set_list, val_set_list, cnt
| 44.967391
| 114
| 0.629321
|
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, train_test_split, GroupShuffleSplit
def split_test(df, test_method='fo', test_size=.2):
"""
method of splitting data into training data and test data
Parameters
----------
df : pd.DataFrame raw data waiting for test set splitting
test_method : str, way to split test set
'fo': split by ratio
'tfo': split by ratio with timestamp
'tloo': leave one out with timestamp
'loo': leave one out
'ufo': split by ratio in user level
'utfo': time-aware split by ratio in user level
test_size : float, size of test set
Returns
-------
train_set : pd.DataFrame training dataset
test_set : pd.DataFrame test dataset
"""
train_set, test_set = pd.DataFrame(), pd.DataFrame()
if test_method == 'ufo':
# driver_ids = df['user']
# _, driver_indices = np.unique(np.array(driver_ids), return_inverse=True)
# gss = GroupShuffleSplit(n_splits=1, test_size=test_size, random_state=2020)
# for train_idx, test_idx in gss.split(df, groups=driver_indices):
# train_set, test_set = df.loc[train_idx, :].copy(), df.loc[test_idx, :].copy()
test_idx = df.groupby('user').apply(
lambda x: x.sample(frac=test_size).index
).explode().values
train_set = df[~df.index.isin(test_idx)]
test_set = df.iloc[test_idx]
elif test_method == 'utfo':
df = df.sort_values(['user', 'timestamp']).reset_index(drop=True)
def time_split(grp):
start_idx = grp.index[0]
split_len = int(np.ceil(len(grp) * (1 - test_size)))
split_idx = start_idx + split_len
end_idx = grp.index[-1]
return list(range(split_idx, end_idx + 1))
test_index = df.groupby('user').apply(time_split).explode().values
test_set = df.loc[test_index, :]
train_set = df[~df.index.isin(test_index)]
elif test_method == 'tfo':
# df = df.sample(frac=1)
df = df.sort_values(['timestamp']).reset_index(drop=True)
split_idx = int(np.ceil(len(df) * (1 - test_size)))
train_set, test_set = df.iloc[:split_idx, :].copy(), df.iloc[split_idx:, :].copy()
elif test_method == 'fo':
train_set, test_set = train_test_split(df, test_size=test_size, random_state=2019)
elif test_method == 'tloo':
# df = df.sample(frac=1)
df = df.sort_values(['timestamp']).reset_index(drop=True)
df['rank_latest'] = df.groupby(['user'])['timestamp'].rank(method='first', ascending=False)
train_set, test_set = df[df['rank_latest'] > 1].copy(), df[df['rank_latest'] == 1].copy()
del train_set['rank_latest'], test_set['rank_latest']
elif test_method == 'loo':
# # slow method
# test_set = df.groupby(['user']).apply(pd.DataFrame.sample, n=1).reset_index(drop=True)
# test_key = test_set[['user', 'item']].copy()
# train_set = df.set_index(['user', 'item']).drop(pd.MultiIndex.from_frame(test_key)).reset_index().copy()
# # quick method
test_index = df.groupby(['user']).apply(lambda grp: np.random.choice(grp.index))
test_set = df.loc[test_index, :].copy()
train_set = df[~df.index.isin(test_index)].copy()
else:
raise ValueError('Invalid data_split value, expect: loo, fo, tloo, tfo')
train_set, test_set = train_set.reset_index(drop=True), test_set.reset_index(drop=True)
return train_set, test_set
def split_validation(train_set, val_method='fo', fold_num=1, val_size=.1):
"""
method of split data into training data and validation data.
(Currently, this method returns list of train & validation set, but I'll change
it to index list or generator in future so as to save memory space) TODO
Parameters
----------
train_set : pd.DataFrame train set waiting for split validation
val_method : str, way to split validation
'cv': combine with fold_num => fold_num-CV
'fo': combine with fold_num & val_size => fold_num-Split by ratio(9:1)
'tfo': Split by ratio with timestamp, combine with val_size => 1-Split by ratio(9:1)
'tloo': Leave one out with timestamp => 1-Leave one out
'loo': combine with fold_num => fold_num-Leave one out
'ufo': split by ratio in user level with K-fold
'utfo': time-aware split by ratio in user level
fold_num : int, the number of folder need to be validated, only work when val_method is 'cv', 'loo', or 'fo'
val_size: float, the size of validation dataset
Returns
-------
train_set_list : List, list of generated training datasets
val_set_list : List, list of generated validation datasets
cnt : cnt: int, the number of train-validation pair
"""
if val_method in ['tloo', 'tfo', 'utfo']:
cnt = 1
elif val_method in ['cv', 'loo', 'fo', 'ufo']:
cnt = fold_num
else:
raise ValueError('Invalid val_method value, expect: cv, loo, tloo, tfo')
train_set_list, val_set_list = [], []
if val_method == 'ufo':
driver_ids = train_set['user']
_, driver_indices = np.unique(np.array(driver_ids), return_inverse=True)
gss = GroupShuffleSplit(n_splits=fold_num, test_size=val_size, random_state=2020)
for train_idx, val_idx in gss.split(train_set, groups=driver_indices):
train_set_list.append(train_set.loc[train_idx, :])
val_set_list.append(train_set.loc[val_idx, :])
if val_method == 'utfo':
train_set = train_set.sort_values(['user', 'timestamp']).reset_index(drop=True)
def time_split(grp):
start_idx = grp.index[0]
split_len = int(np.ceil(len(grp) * (1 - val_size)))
split_idx = start_idx + split_len
end_idx = grp.index[-1]
return list(range(split_idx, end_idx + 1))
val_index = train_set.groupby('user').apply(time_split).explode().values
val_set = train_set.loc[val_index, :]
train_set = train_set[~train_set.index.isin(val_index)]
train_set_list.append(train_set)
val_set_list.append(val_set)
if val_method == 'cv':
kf = KFold(n_splits=fold_num, shuffle=False, random_state=2019)
for train_index, val_index in kf.split(train_set):
train_set_list.append(train_set.loc[train_index, :])
val_set_list.append(train_set.loc[val_index, :])
if val_method == 'fo':
for _ in range(fold_num):
train, validation = train_test_split(train_set, test_size=val_size)
train_set_list.append(train)
val_set_list.append(validation)
elif val_method == 'tfo':
# train_set = train_set.sample(frac=1)
train_set = train_set.sort_values(['timestamp']).reset_index(drop=True)
split_idx = int(np.ceil(len(train_set) * (1 - val_size)))
train_set_list.append(train_set.iloc[:split_idx, :])
val_set_list.append(train_set.iloc[split_idx:, :])
elif val_method == 'loo':
for _ in range(fold_num):
val_index = train_set.groupby(['user']).apply(lambda grp: np.random.choice(grp.index))
val_set = train_set.loc[val_index, :].reset_index(drop=True).copy()
sub_train_set = train_set[~train_set.index.isin(val_index)].reset_index(drop=True).copy()
train_set_list.append(sub_train_set)
val_set_list.append(val_set)
elif val_method == 'tloo':
# train_set = train_set.sample(frac=1)
train_set = train_set.sort_values(['timestamp']).reset_index(drop=True)
train_set['rank_latest'] = train_set.groupby(['user'])['timestamp'].rank(method='first', ascending=False)
new_train_set = train_set[train_set['rank_latest'] > 1].copy()
val_set = train_set[train_set['rank_latest'] == 1].copy()
del new_train_set['rank_latest'], val_set['rank_latest']
train_set_list.append(new_train_set)
val_set_list.append(val_set)
return train_set_list, val_set_list, cnt
| 477
| 0
| 60
|
2a552d302442f5a78ca02c89a6511397c0c088c0
| 3,007
|
py
|
Python
|
ethereum_stats.py
|
cleardev0829/ethereum-nft-activity
|
263f0eb93e6f812c2fd3941ab85c1e56b44e2663
|
[
"MIT"
] | 84
|
2021-05-16T00:57:07.000Z
|
2022-03-30T21:36:17.000Z
|
ethereum_stats.py
|
cleardev0829/ethereum-nft-activity
|
263f0eb93e6f812c2fd3941ab85c1e56b44e2663
|
[
"MIT"
] | 11
|
2021-05-17T05:31:57.000Z
|
2022-03-15T08:34:43.000Z
|
ethereum_stats.py
|
cleardev0829/ethereum-nft-activity
|
263f0eb93e6f812c2fd3941ab85c1e56b44e2663
|
[
"MIT"
] | 6
|
2021-05-16T10:17:04.000Z
|
2022-03-23T23:55:01.000Z
|
import requests
import os
from collections import defaultdict
import pandas as pd
from io import StringIO
from nearest_dict import NearestDict
from utils import load_stats_endpoint
if __name__ == '__main__':
EthereumStats(verbose=True, update=True)
| 38.063291
| 71
| 0.62155
|
import requests
import os
from collections import defaultdict
import pandas as pd
from io import StringIO
from nearest_dict import NearestDict
from utils import load_stats_endpoint
class EthereumStats:
def __init__(self, update=False, verbose=False):
self.cache_fn = 'data/ethereum_stats.csv'
if update or not os.path.exists(self.cache_fn):
self.update(verbose)
df = pd.read_csv(self.cache_fn)
df = df.fillna(0) # important for dailyethburnt
dates = [e.date() for e in pd.to_datetime(df['Date'])]
self.dates = dates
def build_lookup(name, kind, scaling=1):
values = [kind(e) * scaling for e in df[name]]
return NearestDict(zip(dates, values))
self.tx_count = build_lookup('tx', int)
self.miner_fees = build_lookup('transactionfee', float, 1/1e18)
self.block_count = build_lookup('blocks', int)
self.block_rewards = build_lookup('blockreward', float)
self.gas_used = build_lookup('gasused', int)
self.price = build_lookup('etherprice', float)
self.hashrate = build_lookup('hashrate', float)
self.burnt = build_lookup('dailyethburnt', float)
tx_fees = [self.miner_fees[e] + self.burnt[e] for e in dates]
self.tx_fees = NearestDict(zip(dates, tx_fees))
self.tx_count_total = sum(self.tx_count.values)
self.miner_fees_total = sum(self.tx_fees.values)
self.block_count_total = sum(self.block_count.values)
self.block_rewards_total = sum(self.block_rewards.values)
self.gas_used_total = sum(self.gas_used.values)
self.price_total = sum(self.price.values)
self.burnt_total = sum(self.burnt.values)
self.tx_fees_total = sum(self.tx_fees.values)
def update(self, verbose=False):
collected = defaultdict(dict)
def add_source(endpoint):
headers = {'User-Agent': 'Chrome'}
if verbose:
print('Updating', endpoint)
url = load_stats_endpoint(endpoint)
res = requests.get(url, headers=headers)
if verbose:
print('\t', len(res.content), 'bytes')
content = StringIO(res.content.decode('utf8'))
rows = pd.read_csv(content)
cols = ['Date(UTC)','Value']
if len(rows.columns) == 2:
cols[1] = 'BurntFees'
for date, value in rows[cols].values:
date = pd.to_datetime(date).date()
collected[date][endpoint] = value
add_source('tx')
add_source('transactionfee')
add_source('blocks')
add_source('blockreward')
add_source('gasused')
add_source('etherprice')
add_source('hashrate')
add_source('dailyethburnt')
df = pd.DataFrame(collected).transpose()
df.index.name = 'Date'
df.to_csv(self.cache_fn)
if __name__ == '__main__':
EthereumStats(verbose=True, update=True)
| 2,679
| -1
| 76
|
60299346a8dbfa82fe69fd77f6f240edb4338f85
| 575
|
py
|
Python
|
bitmovin_api_sdk/models/filter_type.py
|
hofmannben/bitmovin-api-sdk-python
|
71aae5cd8a31aa0ad54ca07a6f546a624e8686a9
|
[
"MIT"
] | null | null | null |
bitmovin_api_sdk/models/filter_type.py
|
hofmannben/bitmovin-api-sdk-python
|
71aae5cd8a31aa0ad54ca07a6f546a624e8686a9
|
[
"MIT"
] | 1
|
2020-07-06T07:13:43.000Z
|
2020-07-06T07:13:43.000Z
|
bitmovin_api_sdk/models/filter_type.py
|
hofmannben/bitmovin-api-sdk-python
|
71aae5cd8a31aa0ad54ca07a6f546a624e8686a9
|
[
"MIT"
] | 1
|
2020-07-06T07:07:26.000Z
|
2020-07-06T07:07:26.000Z
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
| 25
| 59
| 0.704348
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class FilterType(Enum):
CROP = "CROP"
CONFORM = "CONFORM"
WATERMARK = "WATERMARK"
ENHANCED_WATERMARK = "ENHANCED_WATERMARK"
ROTATE = "ROTATE"
DEINTERLACE = "DEINTERLACE"
AUDIO_MIX = "AUDIO_MIX"
DENOISE_HQDN3D = "DENOISE_HQDN3D"
TEXT = "TEXT"
UNSHARP = "UNSHARP"
SCALE = "SCALE"
INTERLACE = "INTERLACE"
AUDIO_VOLUME = "AUDIO_VOLUME"
EBU_R128_SINGLE_PASS = "EBU_R128_SINGLE_PASS"
| 0
| 412
| 23
|
7bfe5a2bda195f594b3a206467758abb045a880d
| 4,171
|
py
|
Python
|
smartcontract/venv/lib/python3.6/site-packages/bytecode/bytecode.py
|
simplitech/neoinvoice
|
bc9a0217858938b49f99fef13b3439f4a537a5f5
|
[
"MIT"
] | null | null | null |
smartcontract/venv/lib/python3.6/site-packages/bytecode/bytecode.py
|
simplitech/neoinvoice
|
bc9a0217858938b49f99fef13b3439f4a537a5f5
|
[
"MIT"
] | null | null | null |
smartcontract/venv/lib/python3.6/site-packages/bytecode/bytecode.py
|
simplitech/neoinvoice
|
bc9a0217858938b49f99fef13b3439f4a537a5f5
|
[
"MIT"
] | null | null | null |
# alias to keep the 'bytecode' variable free
import bytecode as _bytecode
from bytecode.instr import UNSET, Label, SetLineno, Instr
| 32.333333
| 79
| 0.580676
|
# alias to keep the 'bytecode' variable free
import bytecode as _bytecode
from bytecode.instr import UNSET, Label, SetLineno, Instr
class BaseBytecode:
def __init__(self):
self.argcount = 0
self.kwonlyargcount = 0
# FIXME: insane and safe value until _ConvertBytecodeToConcrete is able
# to compute the value itself
self._stacksize = 256
# FIXME: use something higher level? make it private?
self.flags = 0
self.first_lineno = 1
self.name = '<module>'
self.filename = '<string>'
self.docstring = UNSET
self.cellvars = []
# we cannot recreate freevars from instructions because of super()
# special-case
self.freevars = []
def _copy_attr_from(self, bytecode):
self.argcount = bytecode.argcount
self.kwonlyargcount = bytecode.kwonlyargcount
self._stacksize = bytecode._stacksize
self.flags = bytecode.flags
self.first_lineno = bytecode.first_lineno
self.name = bytecode.name
self.filename = bytecode.filename
self.docstring = bytecode.docstring
self.cellvars = list(bytecode.cellvars)
self.freevars = list(bytecode.freevars)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argcount != other.argcount:
return False
if self.kwonlyargcount != other.kwonlyargcount:
return False
if self._stacksize != other._stacksize:
return False
if self.flags != other.flags:
return False
if self.first_lineno != other.first_lineno:
return False
if self.filename != other.filename:
return False
if self.name != other.name:
return False
if self.docstring != other.docstring:
return False
if self.cellvars != other.cellvars:
return False
if self.freevars != other.freevars:
return False
return True
class _InstrList(list):
def _flat(self):
instructions = []
labels = {}
jumps = []
offset = 0
for index, instr in enumerate(self):
if isinstance(instr, Label):
instructions.append('label_instr%s' % index)
labels[instr] = offset
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
target_label = instr.arg
instr = _bytecode.ConcreteInstr(instr.name, 0,
lineno=instr.lineno)
jumps.append((target_label, instr))
instructions.append(instr)
offset += 1
for target_label, instr in jumps:
instr.arg = labels[target_label]
return instructions
def __eq__(self, other):
if not isinstance(other, _InstrList):
other = _InstrList(other)
return (self._flat() == other._flat())
class Bytecode(_InstrList, BaseBytecode):
def __init__(self, instructions=None):
BaseBytecode.__init__(self)
if instructions is not None:
self.extend(instructions)
self.argnames = []
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
if not isinstance(instr, (Label, SetLineno, Instr,
_bytecode.ConcreteInstr)):
raise ValueError("Bytecode must only contain Label, "
"SetLineno, Instr and ConcreteInstr objects, "
"but %s was found"
% instr.__class__.__name__)
yield instr
@staticmethod
def from_code(code):
concrete = _bytecode.ConcreteBytecode.from_code(code)
return concrete.to_bytecode()
def to_code(self):
return self.to_concrete_bytecode().to_code()
def to_concrete_bytecode(self):
converter = _bytecode._ConvertBytecodeToConcrete(self)
return converter.to_concrete_bytecode()
| 3,659
| 173
| 204
|
8a50b79f3cf1186e9c93e7f970e028198d7fafec
| 3,276
|
py
|
Python
|
Totoro/db.py
|
sdss/Totoro
|
74befd99bda47ebb8c03a276b57371b5788e154a
|
[
"Apache-2.0"
] | 1
|
2018-08-22T00:34:30.000Z
|
2018-08-22T00:34:30.000Z
|
Totoro/db.py
|
sdss/Totoro
|
74befd99bda47ebb8c03a276b57371b5788e154a
|
[
"Apache-2.0"
] | 4
|
2018-06-06T22:10:14.000Z
|
2018-06-14T04:47:23.000Z
|
Totoro/db.py
|
sdss/Totoro
|
74befd99bda47ebb8c03a276b57371b5788e154a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
db.py
Created by José Sánchez-Gallego on 25 Oct 2015.
Licensed under a 3-clause BSD license.
Revision history:
25 Oct 2015 J. Sánchez-Gallego
Initial version
"""
from __future__ import division, print_function
from SDSSconnect import DatabaseConnection
from Totoro import config
def getConfigurationProfiles():
"""Returns a dictionary with all currently configured DB profiles."""
profiles = {}
for kk in config:
if 'dbConnection' in kk and kk != 'dbConnection':
profileName = config[kk]['name'].lower()
profiles[profileName] = config[kk]
if 'password' not in profiles[profileName]:
profiles[profileName]['password'] = ''
return profiles
def getConnection(profile=None):
"""Returns a connection.
If `profile=None`, the default connection is returned."""
# To avoid circular import errors
from Totoro.utils.utils import checkOpenSession
configProfiles = getConfigurationProfiles()
if len(DatabaseConnection.listConnections()) > 0 and profile is None:
return DatabaseConnection.getDefaultConnection()
elif len(DatabaseConnection.listConnections()) == 0 and profile is None:
# Creates the default DB connection
databaseConnectionString = (
'postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}'
.format(**config['dbConnection']))
dbConn = DatabaseConnection(
databaseConnectionString=databaseConnectionString,
new=True,
name=config['dbConnection']['name'],
default=True)
checkOpenSession()
return dbConn
else:
if profile.lower() in DatabaseConnection.listConnections():
return DatabaseConnection.getConnection(profile.lower())
else:
if profile.lower() in configProfiles:
databaseConnectionString = ('postgresql+psycopg2://{user}:{password}@'
'{host}:{port}/{database}'
.format(**configProfiles[profile.lower()]))
dbConn = DatabaseConnection(
databaseConnectionString=databaseConnectionString,
new=True,
name=profile.lower())
checkOpenSession()
return dbConn
else:
raise ValueError('profile {0} does not exist'.format(profile))
def getConnectionFull(profile=None):
"""Returns a connection, its session, plateDB and mangaDB."""
dbConn = getConnection(profile=profile)
return dbConn, dbConn.Session, dbConn.plateDB, dbConn.mangaDB
def setDefaulProfile(profile):
"""Sets a profile as default."""
if len(DatabaseConnection.listConnections()) > 0:
if DatabaseConnection.getDefaultConnectionName() == 'profile':
return
if profile not in getConfigurationProfiles():
raise ValueError('profile {0} does not exist'.format(profile))
if profile in DatabaseConnection.listConnections():
DatabaseConnection.setDefaultConnection(profile)
else:
db = getConnection(profile=profile)
db.setDefaultConnection(profile)
| 32.76
| 87
| 0.639194
|
#!/usr/bin/env python
# encoding: utf-8
"""
db.py
Created by José Sánchez-Gallego on 25 Oct 2015.
Licensed under a 3-clause BSD license.
Revision history:
25 Oct 2015 J. Sánchez-Gallego
Initial version
"""
from __future__ import division, print_function
from SDSSconnect import DatabaseConnection
from Totoro import config
def getConfigurationProfiles():
"""Returns a dictionary with all currently configured DB profiles."""
profiles = {}
for kk in config:
if 'dbConnection' in kk and kk != 'dbConnection':
profileName = config[kk]['name'].lower()
profiles[profileName] = config[kk]
if 'password' not in profiles[profileName]:
profiles[profileName]['password'] = ''
return profiles
def getConnection(profile=None):
"""Returns a connection.
If `profile=None`, the default connection is returned."""
# To avoid circular import errors
from Totoro.utils.utils import checkOpenSession
configProfiles = getConfigurationProfiles()
if len(DatabaseConnection.listConnections()) > 0 and profile is None:
return DatabaseConnection.getDefaultConnection()
elif len(DatabaseConnection.listConnections()) == 0 and profile is None:
# Creates the default DB connection
databaseConnectionString = (
'postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}'
.format(**config['dbConnection']))
dbConn = DatabaseConnection(
databaseConnectionString=databaseConnectionString,
new=True,
name=config['dbConnection']['name'],
default=True)
checkOpenSession()
return dbConn
else:
if profile.lower() in DatabaseConnection.listConnections():
return DatabaseConnection.getConnection(profile.lower())
else:
if profile.lower() in configProfiles:
databaseConnectionString = ('postgresql+psycopg2://{user}:{password}@'
'{host}:{port}/{database}'
.format(**configProfiles[profile.lower()]))
dbConn = DatabaseConnection(
databaseConnectionString=databaseConnectionString,
new=True,
name=profile.lower())
checkOpenSession()
return dbConn
else:
raise ValueError('profile {0} does not exist'.format(profile))
def getConnectionFull(profile=None):
"""Returns a connection, its session, plateDB and mangaDB."""
dbConn = getConnection(profile=profile)
return dbConn, dbConn.Session, dbConn.plateDB, dbConn.mangaDB
def setDefaulProfile(profile):
"""Sets a profile as default."""
if len(DatabaseConnection.listConnections()) > 0:
if DatabaseConnection.getDefaultConnectionName() == 'profile':
return
if profile not in getConfigurationProfiles():
raise ValueError('profile {0} does not exist'.format(profile))
if profile in DatabaseConnection.listConnections():
DatabaseConnection.setDefaultConnection(profile)
else:
db = getConnection(profile=profile)
db.setDefaultConnection(profile)
| 0
| 0
| 0
|
86555084f95f7e761045b55262c48702cf4fedb8
| 700
|
py
|
Python
|
Python-desenvolvimento/ex109.py
|
MarcosMaciel-MMRS/Desenvolvimento-python
|
2b2fc54788da3ca110d495b9e80a494f2b31fb09
|
[
"MIT"
] | null | null | null |
Python-desenvolvimento/ex109.py
|
MarcosMaciel-MMRS/Desenvolvimento-python
|
2b2fc54788da3ca110d495b9e80a494f2b31fb09
|
[
"MIT"
] | null | null | null |
Python-desenvolvimento/ex109.py
|
MarcosMaciel-MMRS/Desenvolvimento-python
|
2b2fc54788da3ca110d495b9e80a494f2b31fb09
|
[
"MIT"
] | null | null | null |
# Modifique as funções que form criadas no desafio 107 para que elas aceitem um parâmetro a mais,
# informando se o valor retornado por elas vai ser ou não formatado pela função moeda(), desenvolvida no desafio 108.
import moeda
#Programa Principal
valor = float(input('Digite o preço:R$ '))
print(f' A aumentando 10% de {moeda.moeda(valor)} é igual à: {moeda.aumentar(valor, formatado=True)}')# isso são parametros do pacote de moedas.
print(f' Diminuindo 10% de {moeda.moeda(valor)} é igual à: {moeda.diminuir(valor, True)}')
print(f' O dobro de {moeda.moeda(valor)} é igual à: R${moeda.dobro(valor, True)}')
print(f' A metade de {moeda.moeda(valor)}: é {moeda.metade(valor, True)}')
| 63.636364
| 146
| 0.721429
|
# Modifique as funções que form criadas no desafio 107 para que elas aceitem um parâmetro a mais,
# informando se o valor retornado por elas vai ser ou não formatado pela função moeda(), desenvolvida no desafio 108.
import moeda
#Programa Principal
valor = float(input('Digite o preço:R$ '))
print(f' A aumentando 10% de {moeda.moeda(valor)} é igual à: {moeda.aumentar(valor, formatado=True)}')# isso são parametros do pacote de moedas.
print(f' Diminuindo 10% de {moeda.moeda(valor)} é igual à: {moeda.diminuir(valor, True)}')
print(f' O dobro de {moeda.moeda(valor)} é igual à: R${moeda.dobro(valor, True)}')
print(f' A metade de {moeda.moeda(valor)}: é {moeda.metade(valor, True)}')
| 0
| 0
| 0
|
9cbaea8964bd6669ced4f828d2fdca4191294703
| 4,231
|
py
|
Python
|
mk2/plugins/discord.py
|
mark2devel/mark2
|
24db9461060ece13fc9e940721a561a3da69dec8
|
[
"MIT-feh"
] | 12
|
2021-10-17T18:24:48.000Z
|
2022-02-02T17:21:16.000Z
|
mk2/plugins/discord.py
|
mark2devel/mark2
|
24db9461060ece13fc9e940721a561a3da69dec8
|
[
"MIT-feh"
] | 19
|
2021-09-30T15:32:37.000Z
|
2022-03-11T12:45:44.000Z
|
mk2/plugins/discord.py
|
mark2devel/mark2
|
24db9461060ece13fc9e940721a561a3da69dec8
|
[
"MIT-feh"
] | 5
|
2021-11-21T17:43:37.000Z
|
2021-12-18T15:10:28.000Z
|
import treq
from mk2.events import EventPriority, ServerEvent, ServerStarted, ServerStopped, ServerStopping, ServerStarting
from mk2.plugins import Plugin
from mk2.shared import decode_if_bytes
class WebhookObject(dict):
""" Custom dict object that represents a discord webhook object """
def add_embed(self, title, fields=[]):
""" Creates an embed object with the specified title and optional list of fields"""
self.embeds.append({"title": title, "fields": fields})
def add_embed_field(self, title, name, value, inline=False):
""" Adds a field to the embed matching the title given """
for embed in self.embeds:
if embed["title"] == title:
embed["fields"].append({"name": name, "value": value, "inline": inline})
break
| 36.474138
| 111
| 0.622075
|
import treq
from mk2.events import EventPriority, ServerEvent, ServerStarted, ServerStopped, ServerStopping, ServerStarting
from mk2.plugins import Plugin
from mk2.shared import decode_if_bytes
class WebhookObject(dict):
""" Custom dict object that represents a discord webhook object """
def __init__(self, username):
self.username = username
self.content = ""
self.embeds = []
def set_content(self, content):
self.content = content
def add_embed(self, title, fields=[]):
""" Creates an embed object with the specified title and optional list of fields"""
self.embeds.append({"title": title, "fields": fields})
def add_embed_field(self, title, name, value, inline=False):
""" Adds a field to the embed matching the title given """
for embed in self.embeds:
if embed["title"] == title:
embed["fields"].append({"name": name, "value": value, "inline": inline})
break
class Discord(Plugin):
webhook_url = Plugin.Property(required=True)
webhook_name = Plugin.Property(default="mark2")
server_name = Plugin.Property(required=True)
# Server event toggles
server_started_enabled = Plugin.Property(default=True)
server_stopped_enabled = Plugin.Property(default=True)
server_starting_enabled = Plugin.Property(default=False)
server_stopping_enabled = Plugin.Property(default=False)
stop_types = {
0: "Terminate",
1: "Restart",
2: "Hold"
}
def setup(self):
# Register event handlers
self.register(self.handle_server_event, ServerEvent, priority=EventPriority.MONITOR)
self.register(self.handle_server_starting, ServerStarting)
self.register(self.handle_server_started, ServerStarted)
self.register(self.handle_server_stopping, ServerStopping)
self.register(self.handle_server_stopped, ServerStopped)
def handle_server_event(self, event):
webhook = WebhookObject(self.webhook_name)
title = "Server event from: {}".format(self.server_name)
fields = [
{"name": "Cause", "value": event.cause},
{"name": "Data", "value": event.data}
]
webhook.add_embed(title, fields)
self.send_webhook(webhook)
def handle_server_starting(self, event):
if self.server_starting_enabled:
webhook = WebhookObject(self.webhook_name)
title = "Server Starting Event"
fields = []
fields = [
{"name": decode_if_bytes(self.server_name), "value": "Server is starting"},
{"name": "PID", "value": event.pid},
]
webhook.add_embed(title, fields)
self.send_webhook(webhook)
def handle_server_started(self, event):
if self.server_started_enabled:
webhook = WebhookObject(self.webhook_name)
title = "Server Started Event"
fields = [
{"name": decode_if_bytes(self.server_name), "value": "Server Started"}
]
webhook.add_embed(title, fields)
self.send_webhook(webhook)
def handle_server_stopping(self, event):
if self.server_stopping_enabled:
webhook = WebhookObject(self.webhook_name)
title = "Server Stopping Event"
fields = [
{"name": decode_if_bytes(self.server_name), "value": "Server is stopping"},
{"name": "Reason", "value": event.reason},
{"name": "Stop Type", "value": self.stop_types.get(event.respawn)}
]
webhook.add_embed(title, fields)
self.send_webhook(webhook)
def handle_server_stopped(self, event):
if self.server_stopped_enabled:
webhook = WebhookObject(self.webhook_name)
title = "Server Stopped Event"
fields = [
{"name": decode_if_bytes(self.server_name), "value": "Server Stopped"}
]
webhook.add_embed(title, fields)
self.send_webhook(webhook)
def send_webhook(self, data):
d = treq.post(self.webhook_url, json=data.__dict__)
| 2,623
| 706
| 76
|
db216b646a4720cac5c7bf61c0221fc07bbc28d6
| 3,396
|
py
|
Python
|
src/sciplot/analysis.py
|
simonUU/sciplot
|
896a7a4efa7911a2bdf303311be674de12171fc4
|
[
"MIT"
] | null | null | null |
src/sciplot/analysis.py
|
simonUU/sciplot
|
896a7a4efa7911a2bdf303311be674de12171fc4
|
[
"MIT"
] | null | null | null |
src/sciplot/analysis.py
|
simonUU/sciplot
|
896a7a4efa7911a2bdf303311be674de12171fc4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Analysis tools
"""
import sciplot
import numpy as np
import matplotlib.pyplot as plt
from .functions import _hist_init
def plot_flatness(sig, tag, bins=None, ax=None, xrange=None, percent_step=5):
""" Plotting differences of sig distribution in percentiles of tag distribution
Args:
sig:
tag:
bins:
ax:
xrange:
percent_step:
Returns:
"""
if ax is None:
fix, ax = plt.subplots()
xaxis = _hist_init(sig, bins=bins, xrange=xrange)
colormap = plt.get_cmap('magma')
orig, x = np.histogram(sig, bins=xaxis, range=xrange, normed=True, )
bin_center = ((x + np.roll(x, 1)) / 2)[1:]
tmp = orig/orig
ax.plot(bin_center, tmp, color='black', lw=1)
for quantil in np.arange(5, 100, percent_step):
cut = np.percentile(tag, quantil)
sel = tag >= cut
y, x = np.histogram(sig[sel], bins=x, range=xrange, normed=True, )
y /= orig
ax.fill_between(bin_center, tmp, y, color=colormap(quantil/100.0))
tmp = y
def ratio(y1, y2, y1_err=None, y2_err= None):
""" calculate the ratio between two histograms y1/y2
Args:
y1: y values of first histogram
y2: y values of second histogram
y1_err: (optional) error of first
y2_err: (optional) error of second
Returns:
ratio, ratio_error
"""
assert len(y1) == len(y2), "y1 and y2 length does not match"
y1e = np.sqrt(y1) if y1_err is None else y1_err
y2e = np.sqrt(y2) if y2_err is None else y2_err
r = y1/y2
re = np.sqrt((y1/(1.0*y2*y2))*(y1/(1.0*y2*y2))*y2e*y2e+(1/(1.0*y2))*(1/(1.0*y2))*y1e*y1e)
return r, re
def data_mc_ratio(data, mc, label_data='Data', label_mc="MC",
y_label=None, figsize=None, ratio_range=(0, 2),
*args, **kwarg):
""" Plot a comparison between two sets of data
Args:
data:
mc:
label_data:
label_mc:
y_label:
figsize:
ratio_range:
*args:
**kwarg:
Returns:
"""
f, axes = plt.subplots(2, 1, gridspec_kw={"height_ratios": [3, 1]}, sharex=True, figsize=figsize)
ax0 = axes[0]
hm = sciplot.hist(mc, lw=2, ax=ax0, label=label_mc, *args, **kwarg)
hd = sciplot.errorhist(data, ax=ax0, label=label_data, color='black')
ax0.legend()
ax1 = axes[1]
ry, rye = ratio(hd[0], hm[0])
sciplot.errorbar(hd[1], ry, rye, ax=ax1, color='grey')
ax1.axhline(1, color='grey', lw=0.5, ls='--')
f.subplots_adjust(hspace=0.1)
ax1.set_ylim(*ratio_range)
sciplot.xlim()
if y_label is not None:
ax0.set_ylabel(y_label)
ax1.set_ylabel("Ratio")
ax1.yaxis.set_label_coords(-0.08, 0.5)
ax0.yaxis.set_label_coords(-0.08, 0.5)
| 28.066116
| 101
| 0.595701
|
# -*- coding: utf-8 -*-
""" Analysis tools
"""
import sciplot
import numpy as np
import matplotlib.pyplot as plt
from .functions import _hist_init
def plot_flatness(sig, tag, bins=None, ax=None, xrange=None, percent_step=5):
""" Plotting differences of sig distribution in percentiles of tag distribution
Args:
sig:
tag:
bins:
ax:
xrange:
percent_step:
Returns:
"""
if ax is None:
fix, ax = plt.subplots()
xaxis = _hist_init(sig, bins=bins, xrange=xrange)
colormap = plt.get_cmap('magma')
orig, x = np.histogram(sig, bins=xaxis, range=xrange, normed=True, )
bin_center = ((x + np.roll(x, 1)) / 2)[1:]
tmp = orig/orig
ax.plot(bin_center, tmp, color='black', lw=1)
for quantil in np.arange(5, 100, percent_step):
cut = np.percentile(tag, quantil)
sel = tag >= cut
y, x = np.histogram(sig[sel], bins=x, range=xrange, normed=True, )
y /= orig
ax.fill_between(bin_center, tmp, y, color=colormap(quantil/100.0))
tmp = y
def profile(x, y, bins=None, range=None, fmt='.', *args, **kwargs):
try:
import scipy
except ImportError:
print("Scipy is needed for this feature")
else:
xaxis = _hist_init(x, bins, xrange=range)
means = scipy.stats.binned_statistic(x, y, bins=xaxis, statistic='mean').statistic
std = scipy.stats.binned_statistic(x, y, bins=xaxis, statistic=scipy.stats.sem).statistic
bin_centers = (xaxis[:-1] + xaxis[1:]) / 2.
plt.errorbar(x=bin_centers, y=means, yerr=std, linestyle='none', fmt=fmt, *args, **kwargs)
def ratio(y1, y2, y1_err=None, y2_err= None):
""" calculate the ratio between two histograms y1/y2
Args:
y1: y values of first histogram
y2: y values of second histogram
y1_err: (optional) error of first
y2_err: (optional) error of second
Returns:
ratio, ratio_error
"""
assert len(y1) == len(y2), "y1 and y2 length does not match"
y1e = np.sqrt(y1) if y1_err is None else y1_err
y2e = np.sqrt(y2) if y2_err is None else y2_err
r = y1/y2
re = np.sqrt((y1/(1.0*y2*y2))*(y1/(1.0*y2*y2))*y2e*y2e+(1/(1.0*y2))*(1/(1.0*y2))*y1e*y1e)
return r, re
def data_mc_ratio(data, mc, label_data='Data', label_mc="MC",
y_label=None, figsize=None, ratio_range=(0, 2),
*args, **kwarg):
""" Plot a comparison between two sets of data
Args:
data:
mc:
label_data:
label_mc:
y_label:
figsize:
ratio_range:
*args:
**kwarg:
Returns:
"""
f, axes = plt.subplots(2, 1, gridspec_kw={"height_ratios": [3, 1]}, sharex=True, figsize=figsize)
ax0 = axes[0]
hm = sciplot.hist(mc, lw=2, ax=ax0, label=label_mc, *args, **kwarg)
hd = sciplot.errorhist(data, ax=ax0, label=label_data, color='black')
ax0.legend()
ax1 = axes[1]
ry, rye = ratio(hd[0], hm[0])
sciplot.errorbar(hd[1], ry, rye, ax=ax1, color='grey')
ax1.axhline(1, color='grey', lw=0.5, ls='--')
f.subplots_adjust(hspace=0.1)
ax1.set_ylim(*ratio_range)
sciplot.xlim()
if y_label is not None:
ax0.set_ylabel(y_label)
ax1.set_ylabel("Ratio")
ax1.yaxis.set_label_coords(-0.08, 0.5)
ax0.yaxis.set_label_coords(-0.08, 0.5)
| 553
| 0
| 23
|
8de36e68ec6edfa63df90a9c396fa7136e08195b
| 1,696
|
py
|
Python
|
set2/CBCBitflipping.py
|
arunKumarNOP/Cryptopals-Solutions
|
529474679972e17566a233de110f0b47078ef3b9
|
[
"MIT"
] | null | null | null |
set2/CBCBitflipping.py
|
arunKumarNOP/Cryptopals-Solutions
|
529474679972e17566a233de110f0b47078ef3b9
|
[
"MIT"
] | null | null | null |
set2/CBCBitflipping.py
|
arunKumarNOP/Cryptopals-Solutions
|
529474679972e17566a233de110f0b47078ef3b9
|
[
"MIT"
] | null | null | null |
import string
import random
from set1.Aes_cipher import *
from set2.PkcsPadding import pkcs7_pad_check
from set1.Xor import xorPlain
KEY = ''.join([chr(random.randint(0,255)) for i in range(16)])
IV = ''.join([chr(random.randint(0,255)) for i in range(16)])
if __name__ == '__main__':
# replace d with ; and second d with =
enc = format_string('dadmindtrue')
print 'Before:', is_admin(str(enc))
# https://upload.wikimedia.org/wikipedia/commons/thumb/2/2a/CBC_decryption.svg/601px-CBC_decryption.svg.png
# we know that 'comment1=cooking%20MCs;userdata='+user_input+';comment2=%20like%20a%20pound%20of%20bacon'
# is being encrypted so split it into block of 16 and determine in which block our input falls into.
# Take the previous encrypted block xor with the plain input to get the output of the AES cipher of the current block
# then xor it with the desired output and make the previous block equal to that
# in my case it was 2nd block
enc2 = enc[0:16] + getBitFlippedBlock(enc[16:32], 'dadmindtrue;comm', ';admin=true;comm') + enc[32:]
print 'After:', is_admin(str(enc2))
| 39.44186
| 121
| 0.719929
|
import string
import random
from set1.Aes_cipher import *
from set2.PkcsPadding import pkcs7_pad_check
from set1.Xor import xorPlain
KEY = ''.join([chr(random.randint(0,255)) for i in range(16)])
IV = ''.join([chr(random.randint(0,255)) for i in range(16)])
def format_string(user_input):
user_input = user_input.replace(';', '').replace('=', '')
plainTxt = 'comment1=cooking%20MCs;userdata='+user_input+';comment2=%20like%20a%20pound%20of%20bacon'
return encrypt_cbc(plainTxt, KEY, IV)
def is_admin(cipher_txt):
decrypted = pkcs7_pad_check(decrypt_cbc(cipher_txt, KEY, IV))
pairs = decrypted.split(';')
if 'admin=true' in pairs:
return True
return False
def getBitFlippedBlock(enc_block, input_plain, desired_output):
return xorPlain(xorPlain(input_plain,desired_output), enc_block)
if __name__ == '__main__':
# replace d with ; and second d with =
enc = format_string('dadmindtrue')
print 'Before:', is_admin(str(enc))
# https://upload.wikimedia.org/wikipedia/commons/thumb/2/2a/CBC_decryption.svg/601px-CBC_decryption.svg.png
# we know that 'comment1=cooking%20MCs;userdata='+user_input+';comment2=%20like%20a%20pound%20of%20bacon'
# is being encrypted so split it into block of 16 and determine in which block our input falls into.
# Take the previous encrypted block xor with the plain input to get the output of the AES cipher of the current block
# then xor it with the desired output and make the previous block equal to that
# in my case it was 2nd block
enc2 = enc[0:16] + getBitFlippedBlock(enc[16:32], 'dadmindtrue;comm', ';admin=true;comm') + enc[32:]
print 'After:', is_admin(str(enc2))
| 500
| 0
| 69
|
164a70d52abb92a71df0d32864f77473b5e2decc
| 12,608
|
py
|
Python
|
networks/networks.py
|
nexuslrf/Tetris-RL
|
94e4066ed08e87493141af37f11b1b2387b189a3
|
[
"MIT"
] | 3
|
2020-12-15T18:08:22.000Z
|
2020-12-18T06:09:49.000Z
|
networks/networks.py
|
nexuslrf/Tetris-RL
|
94e4066ed08e87493141af37f11b1b2387b189a3
|
[
"MIT"
] | null | null | null |
networks/networks.py
|
nexuslrf/Tetris-RL
|
94e4066ed08e87493141af37f11b1b2387b189a3
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.layers import NoisyLinear
from networks.network_bodies import SimpleBody, AtariBody
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
########Recurrent Architectures#########
########Actor Critic Architectures#########
| 36.651163
| 152
| 0.633883
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.layers import NoisyLinear
from networks.network_bodies import SimpleBody, AtariBody
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DQN(nn.Module):
def __init__(self, input_shape, num_actions, noisy=False, sigma_init=0.5, body=SimpleBody):
super(DQN, self).__init__()
self.input_shape = input_shape
self.num_actions = num_actions
self.noisy=noisy
self.body = body(input_shape, num_actions, noisy, sigma_init)
self.fc1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.fc2 = nn.Linear(512, self.num_actions) if not self.noisy else NoisyLinear(512, self.num_actions, sigma_init)
def forward(self, x):
x = self.body(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def sample_noise(self):
if self.noisy:
self.body.sample_noise()
self.fc1.sample_noise()
self.fc2.sample_noise()
class DuelingDQN(nn.Module):
def __init__(self, input_shape, num_outputs, noisy=False, sigma_init=0.5, body=SimpleBody):
super(DuelingDQN, self).__init__()
self.input_shape = input_shape
self.num_actions = num_outputs
self.noisy=noisy
self.body = body(input_shape, num_outputs, noisy, sigma_init)
self.adv1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.adv2 = nn.Linear(512, self.num_actions) if not self.noisy else NoisyLinear(512, self.num_actions, sigma_init)
self.val1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.val2 = nn.Linear(512, 1) if not self.noisy else NoisyLinear(512, 1, sigma_init)
def forward(self, x):
x = self.body(x)
adv = F.relu(self.adv1(x))
adv = self.adv2(adv)
val = F.relu(self.val1(x))
val = self.val2(val)
return val + adv - adv.mean()
def sample_noise(self):
if self.noisy:
self.body.sample_noise()
self.adv1.sample_noise()
self.adv2.sample_noise()
self.val1.sample_noise()
self.val2.sample_noise()
class CategoricalDQN(nn.Module):
def __init__(self, input_shape, num_outputs, noisy=False, sigma_init=0.5, body=SimpleBody, atoms=51):
super(CategoricalDQN, self).__init__()
self.input_shape = input_shape
self.num_actions = num_outputs
self.noisy=noisy
self.atoms=atoms
self.body = body(input_shape, num_outputs, noisy, sigma_init)
self.fc1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.fc2 = nn.Linear(512, self.num_actions*self.atoms) if not self.noisy else NoisyLinear(512, self.num_actions*self.atoms, sigma_init)
def forward(self, x):
x = self.body(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x.view(-1, self.num_actions, self.atoms), dim=2)
def sample_noise(self):
if self.noisy:
self.body.sample_noise()
self.fc1.sample_noise()
self.fc2.sample_noise()
class CategoricalDuelingDQN(nn.Module):
def __init__(self, input_shape, num_outputs, noisy=False, sigma_init=0.5, body=SimpleBody, atoms=51):
super(CategoricalDuelingDQN, self).__init__()
self.input_shape = input_shape
self.num_actions = num_outputs
self.noisy=noisy
self.atoms=atoms
self.body = body(input_shape, num_outputs, noisy, sigma_init)
self.adv1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.adv2 = nn.Linear(512, self.num_actions*self.atoms) if not self.noisy else NoisyLinear(512, self.num_actions*self.atoms, sigma_init)
self.val1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.val2 = nn.Linear(512, 1*self.atoms) if not self.noisy else NoisyLinear(512, 1*self.atoms, sigma_init)
def forward(self, x):
x = self.body(x)
adv = F.relu(self.adv1(x))
adv = self.adv2(adv).view(-1, self.num_actions, self.atoms)
val = F.relu(self.val1(x))
val = self.val2(val).view(-1, 1, self.atoms)
final = val + adv - adv.mean(dim=1).view(-1, 1, self.atoms)
return F.softmax(final, dim=2)
def sample_noise(self):
if self.noisy:
self.body.sample_noise()
self.adv1.sample_noise()
self.adv2.sample_noise()
self.val1.sample_noise()
self.val2.sample_noise()
class QRDQN(nn.Module):
def __init__(self, input_shape, num_outputs, noisy=False, sigma_init=0.5, body=SimpleBody, quantiles=51):
super(QRDQN, self).__init__()
self.input_shape = input_shape
self.num_actions = num_outputs
self.noisy=noisy
self.quantiles=quantiles
self.body = body(input_shape, num_outputs, noisy, sigma_init)
self.fc1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.fc2 = nn.Linear(512, self.num_actions*self.quantiles) if not self.noisy else NoisyLinear(512, self.num_actions*self.quantiles, sigma_init)
def forward(self, x):
x = self.body(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x.view(-1, self.num_actions, self.quantiles)
def sample_noise(self):
if self.noisy:
self.body.sample_noise()
self.fc1.sample_noise()
self.fc2.sample_noise()
class DuelingQRDQN(nn.Module):
def __init__(self, input_shape, num_outputs, noisy=False, sigma_init=0.5, body=SimpleBody, quantiles=51):
super(DuelingQRDQN, self).__init__()
self.input_shape = input_shape
self.num_actions = num_outputs
self.noisy=noisy
self.quantiles=quantiles
self.body = body(input_shape, num_outputs, noisy, sigma_init)
self.adv1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.adv2 = nn.Linear(512, self.num_actions*self.quantiles) if not self.noisy else NoisyLinear(512, self.num_actions*self.quantiles, sigma_init)
self.val1 = nn.Linear(self.body.feature_size(), 512) if not self.noisy else NoisyLinear(self.body.feature_size(), 512, sigma_init)
self.val2 = nn.Linear(512, 1*self.quantiles) if not self.noisy else NoisyLinear(512, 1*self.quantiles, sigma_init)
def forward(self, x):
x = self.body(x)
adv = F.relu(self.adv1(x))
adv = self.adv2(adv).view(-1, self.num_actions, self.quantiles)
val = F.relu(self.val1(x))
val = self.val2(val).view(-1, 1, self.quantiles)
final = val + adv - adv.mean(dim=1).view(-1, 1, self.quantiles)
return final
def sample_noise(self):
if self.noisy:
self.body.sample_noise()
self.adv1.sample_noise()
self.adv2.sample_noise()
self.val1.sample_noise()
self.val2.sample_noise()
########Recurrent Architectures#########
class DRQN(nn.Module):
def __init__(self, input_shape, num_actions, noisy=False, sigma_init=0.5, gru_size=512, bidirectional=False, body=SimpleBody):
super(DRQN, self).__init__()
self.input_shape = input_shape
self.num_actions = num_actions
self.noisy = noisy
self.gru_size = gru_size
self.bidirectional = bidirectional
self.num_directions = 2 if self.bidirectional else 1
self.body = body(input_shape, num_actions, noisy=self.noisy, sigma_init=sigma_init)
self.gru = nn.GRU(self.body.feature_size(), self.gru_size, num_layers=1, batch_first=True, bidirectional=bidirectional)
self.fc2 = nn.Linear(self.gru_size, self.num_actions) if not self.noisy else NoisyLinear(self.gru_size, self.num_actions, sigma_init)
def forward(self, x, hx=None):
batch_size = x.size(0)
sequence_length = x.size(1)
x = x.view((-1,)+self.input_shape)
#format outp for batch first gru
feats = self.body(x).view(batch_size, sequence_length, -1)
hidden = self.init_hidden(batch_size) if hx is None else hx
out, hidden = self.gru(feats, hidden)
x = self.fc2(out)
return x, hidden
def init_hidden(self, batch_size):
return torch.zeros(1*self.num_directions, batch_size, self.gru_size, device=device, dtype=torch.float)
def sample_noise(self):
if self.noisy:
self.body.sample_noise()
self.fc2.sample_noise()
########Actor Critic Architectures#########
class ActorCritic(nn.Module):
def __init__(self, input_shape, num_actions):
super(ActorCritic, self).__init__()
init_ = lambda m: self.layer_init(m, nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
self.conv1 = init_(nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4))
self.conv2 = init_(nn.Conv2d(32, 64, kernel_size=4, stride=2))
self.conv3 = init_(nn.Conv2d(64, 32, kernel_size=3, stride=1))
self.fc1 = init_(nn.Linear(self.feature_size(input_shape), 512))
init_ = lambda m: self.layer_init(m, nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0))
self.critic_linear = init_(nn.Linear(512, 1))
init_ = lambda m: self.layer_init(m, nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0), gain=0.01)
self.actor_linear = init_(nn.Linear(512, num_actions))
self.train()
def forward(self, inputs):
x = F.relu(self.conv1(inputs/255.0))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
value = self.critic_linear(x)
logits = self.actor_linear(x)
return logits, value
def feature_size(self, input_shape):
return self.conv3(self.conv2(self.conv1(torch.zeros(1, *input_shape)))).view(1, -1).size(1)
def layer_init(self, module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
class ActorCriticER(nn.Module):
def __init__(self, input_shape, num_actions):
super(ActorCriticER, self).__init__()
init_ = lambda m: self.layer_init(m, nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
self.conv1 = init_(nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4))
self.conv2 = init_(nn.Conv2d(32, 64, kernel_size=4, stride=2))
self.conv3 = init_(nn.Conv2d(64, 32, kernel_size=3, stride=1))
self.fc1 = init_(nn.Linear(self.feature_size(input_shape), 512))
init_ = lambda m: self.layer_init(m, nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0))
self.critic_linear = init_(nn.Linear(512, num_actions))
init_ = lambda m: self.layer_init(m, nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0), gain=0.01)
self.actor_linear = init_(nn.Linear(512, num_actions))
self.train()
def forward(self, inputs):
x = F.relu(self.conv1(inputs/255.0))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
q_value = self.critic_linear(x)
logits = self.actor_linear(x)
policy = F.softmax(logits, dim=1)
value = (policy * q_value).sum(-1, keepdim=True)
return logits, policy, value, q_value
def feature_size(self, input_shape):
return self.conv3(self.conv2(self.conv1(torch.zeros(1, *input_shape)))).view(1, -1).size(1)
def layer_init(self, module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
| 11,119
| 66
| 1,091
|
a1dceaf9a2935d1a6ca59068bfb4fc8ca01168e5
| 4,201
|
py
|
Python
|
orangecontrib/esrf/wofry/widgets/extension/ow_info.py
|
oasys-esrf-kit/OASYS1-ESRF-Extensions
|
5df1ac01a1319d900380196c850504e09e36f9d6
|
[
"MIT"
] | null | null | null |
orangecontrib/esrf/wofry/widgets/extension/ow_info.py
|
oasys-esrf-kit/OASYS1-ESRF-Extensions
|
5df1ac01a1319d900380196c850504e09e36f9d6
|
[
"MIT"
] | null | null | null |
orangecontrib/esrf/wofry/widgets/extension/ow_info.py
|
oasys-esrf-kit/OASYS1-ESRF-Extensions
|
5df1ac01a1319d900380196c850504e09e36f9d6
|
[
"MIT"
] | 1
|
2020-06-22T05:46:07.000Z
|
2020-06-22T05:46:07.000Z
|
import sys
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import QRect
from PyQt5.QtWidgets import QApplication, QFileDialog
from Shadow import ShadowTools as ST
from orangewidget import gui
from oasys.widgets import gui as oasysgui, widget
from oasys.util.oasys_util import EmittingStream
from orangecontrib.wofry.util.wofry_objects import WofryData
from orangecontrib.wofry.widgets.gui.python_script import PythonScript
if __name__ == "__main__":
import sys
from PyQt5.QtWidgets import QApplication
a = QApplication(sys.argv)
ow = OWWOInfo()
ow.show()
a.exec_()
ow.saveSettings()
| 36.215517
| 153
| 0.685313
|
import sys
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import QRect
from PyQt5.QtWidgets import QApplication, QFileDialog
from Shadow import ShadowTools as ST
from orangewidget import gui
from oasys.widgets import gui as oasysgui, widget
from oasys.util.oasys_util import EmittingStream
from orangecontrib.wofry.util.wofry_objects import WofryData
from orangecontrib.wofry.widgets.gui.python_script import PythonScript
class OWWOInfo(widget.OWWidget):
name = "Info"
description = "Display Data: Info"
icon = "icons/info.png"
maintainer = "M Sanchez del Rio"
maintainer_email = "srio(@at@)esrf.eu"
priority = 300
category = "Data Display Tools"
keywords = ["data", "file", "load", "read"]
inputs = [("Input Beam", WofryData, "set_input")]
WIDGET_WIDTH = 950
WIDGET_HEIGHT = 650
want_main_area=1
want_control_area = 0
input_data=None
def __init__(self, show_automatic_box=True):
super().__init__()
geom = QApplication.desktop().availableGeometry()
self.setGeometry(QRect(round(geom.width()*0.05),
round(geom.height()*0.05),
round(min(geom.width()*0.98, self.WIDGET_WIDTH)),
round(min(geom.height()*0.95, self.WIDGET_HEIGHT))))
gen_box = gui.widgetBox(self.mainArea, "Beamline Info", addSpace=True, orientation="horizontal")
tabs_setting = oasysgui.tabWidget(gen_box)
tabs_setting.setFixedHeight(self.WIDGET_HEIGHT-60)
tabs_setting.setFixedWidth(self.WIDGET_WIDTH-60)
tab_sys = oasysgui.createTabPage(tabs_setting, "Sys Info")
tab_dis = oasysgui.createTabPage(tabs_setting, "Distances Summary")
tab_scr = oasysgui.createTabPage(tabs_setting, "Python Script")
tab_out = oasysgui.createTabPage(tabs_setting, "System Output")
self.sysInfo = oasysgui.textArea()
self.sysInfo.setMaximumHeight(self.WIDGET_HEIGHT-100)
sys_box = oasysgui.widgetBox(tab_sys, "", addSpace=True, orientation="horizontal", height = self.WIDGET_HEIGHT-80, width = self.WIDGET_WIDTH-80)
sys_box.layout().addWidget(self.sysInfo)
self.distancesSummary = oasysgui.textArea()
self.distancesSummary.setMaximumHeight(self.WIDGET_HEIGHT-100)
dist_box = oasysgui.widgetBox(tab_dis, "", addSpace=True, orientation="horizontal", height = self.WIDGET_HEIGHT-80, width = self.WIDGET_WIDTH-80)
dist_box.layout().addWidget(self.distancesSummary)
self.pythonScript = oasysgui.textArea(readOnly=False)
self.pythonScript.setMaximumHeight(self.WIDGET_HEIGHT - 300)
script_box = gui.widgetBox(tab_scr, "Python script", addSpace=True, orientation="horizontal")
self.wofry_python_script = PythonScript()
self.wofry_python_script.code_area.setFixedHeight(300)
script_box.layout().addWidget(self.wofry_python_script)
self.wofry_output = oasysgui.textArea()
out_box = oasysgui.widgetBox(tab_out, "System Output", addSpace=True, orientation="horizontal", height=self.WIDGET_HEIGHT - 80)
out_box.layout().addWidget(self.wofry_output)
def set_input(self, wofry_data):
if not wofry_data is None:
if isinstance(wofry_data, WofryData):
self.input_data = wofry_data
else:
raise Exception("Only wofry_data allowed as input")
self.update()
def update(self):
if self.input_data is None:
return
bl = self.input_data.get_beamline()
self.distancesSummary.setPlainText(bl.distances())
self.wofry_python_script.set_code(bl.to_python_code())
self.sysInfo.setPlainText(bl.info())
def writeStdOut(self, text):
cursor = self.shadow_output.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.insertText(text)
self.wofryoutput.setTextCursor(cursor)
self.wofryoutput.ensureCursorVisible()
if __name__ == "__main__":
import sys
from PyQt5.QtWidgets import QApplication
a = QApplication(sys.argv)
ow = OWWOInfo()
ow.show()
a.exec_()
ow.saveSettings()
| 2,995
| 561
| 23
|
6bfe089b4d6373b2da8ea4381602de6bb894705f
| 728
|
py
|
Python
|
projects/distribution_tools/Glue-and-C-Codec/build-rlglue-uninstall-script.py
|
sakaimiho/rl-glue-ext
|
914b039849544a042f138cea2aa28c2b4af1b049
|
[
"Apache-2.0"
] | 1
|
2017-05-26T03:21:24.000Z
|
2017-05-26T03:21:24.000Z
|
projects/distribution_tools/Glue-and-C-Codec/build-rlglue-uninstall-script.py
|
cameron-upright/rl-glue-ext
|
914b039849544a042f138cea2aa28c2b4af1b049
|
[
"Apache-2.0"
] | null | null | null |
projects/distribution_tools/Glue-and-C-Codec/build-rlglue-uninstall-script.py
|
cameron-upright/rl-glue-ext
|
914b039849544a042f138cea2aa28c2b4af1b049
|
[
"Apache-2.0"
] | null | null | null |
import os
basePath = './install_root'
RLGlue_Files = []
pythonTemplateFileName='uninstall-rlglue-template.py'
pythonUninstallFileName='uninstall-resources/uninstall-rlglue.py'
for root, dirs, files in os.walk(basePath):
for f in files:
if f.endswith('.h') or f.endswith('.dylib') or f.endswith('a'):
thisName=os.path.join(root, f)
nameWithoutBase=thisName[len(basePath):]
RLGlue_Files.append(nameWithoutBase)
subs={}
subs['RLGLUE_FILE_REPLACE_HERE']=str(RLGlue_Files)
f = file(pythonTemplateFileName)
newlines = []
for line in f:
for key,value in subs.iteritems():
if key in line:
line=line.replace(key,value)
newlines.append(line)
outfile = file(pythonUninstallFileName, 'w')
outfile.writelines(newlines)
| 28
| 65
| 0.752747
|
import os
basePath = './install_root'
RLGlue_Files = []
pythonTemplateFileName='uninstall-rlglue-template.py'
pythonUninstallFileName='uninstall-resources/uninstall-rlglue.py'
for root, dirs, files in os.walk(basePath):
for f in files:
if f.endswith('.h') or f.endswith('.dylib') or f.endswith('a'):
thisName=os.path.join(root, f)
nameWithoutBase=thisName[len(basePath):]
RLGlue_Files.append(nameWithoutBase)
subs={}
subs['RLGLUE_FILE_REPLACE_HERE']=str(RLGlue_Files)
f = file(pythonTemplateFileName)
newlines = []
for line in f:
for key,value in subs.iteritems():
if key in line:
line=line.replace(key,value)
newlines.append(line)
outfile = file(pythonUninstallFileName, 'w')
outfile.writelines(newlines)
| 0
| 0
| 0
|
1891f09831170e77f4a81261ea5280b9bd506b24
| 1,915
|
py
|
Python
|
hello-world-book-programs/bank_account.py
|
kpatell/basic-python
|
0a8384913b7baba302b7eeefbb002492bc035048
|
[
"MIT"
] | null | null | null |
hello-world-book-programs/bank_account.py
|
kpatell/basic-python
|
0a8384913b7baba302b7eeefbb002492bc035048
|
[
"MIT"
] | 2
|
2020-05-27T00:08:35.000Z
|
2020-06-02T00:15:40.000Z
|
hello-world-book-programs/bank_account.py
|
kpatell/basic-python
|
0a8384913b7baba302b7eeefbb002492bc035048
|
[
"MIT"
] | null | null | null |
# Krishan Patel
# Bank Account Class
"""Chaper 14: Objects
From Hello World! Computer Programming for Kids and Beginners
Copyright Warren and Carter Sande, 2009-2013
"""
# Chapter 14 - Try it out
class BankAccount:
"""Creates a bank account"""
def display_balance(self):
"""Displays the balance of the bank account"""
print("Balance:", self.balance)
def deposit(self, money_deposit):
"""Makes a deposit into bank account (adds more money to balance)"""
self.balance += money_deposit
def withdraw(self, money_withdraw):
"""Withdraws money from bank account (reduces balance)"""
self.balance -= money_withdraw
class InterestAccount(BankAccount):
"""Type of bank account that earns interest"""
def add_interest(self, rate):
"""Adds interest to bank account"""
interest = self.balance*rate
self.deposit(interest)
# Testing out BankAccount class
print("----------Testing BankAccount----------")
bankAccount = BankAccount("Krishan Patel", 123456)
print(bankAccount)
print()
bankAccount.display_balance()
print()
bankAccount.deposit(34.52)
print(bankAccount)
print()
bankAccount.withdraw(12.25)
print(bankAccount)
print()
bankAccount.withdraw(30.18)
print(bankAccount)
print()
# Testing out InterestAccount class
print("----------Testing InterestAccount----------")
interestAccount = InterestAccount("Krishan Patel", 234567)
print(interestAccount)
print()
interestAccount.display_balance()
print()
interestAccount.deposit(34.52)
print(interestAccount)
print()
interestAccount.add_interest(0.11)
print(interestAccount)
| 25.197368
| 76
| 0.695039
|
# Krishan Patel
# Bank Account Class
"""Chaper 14: Objects
From Hello World! Computer Programming for Kids and Beginners
Copyright Warren and Carter Sande, 2009-2013
"""
# Chapter 14 - Try it out
class BankAccount:
"""Creates a bank account"""
def __init__(self, name, account_number):
self.name = name
self.account_number = account_number
self.balance = 0.0
def __str__(self):
return self.name + "\nAccount Number: %s \nBalance: %s" % \
(self.account_number, round(self.balance, 2))
def display_balance(self):
"""Displays the balance of the bank account"""
print("Balance:", self.balance)
def deposit(self, money_deposit):
"""Makes a deposit into bank account (adds more money to balance)"""
self.balance += money_deposit
def withdraw(self, money_withdraw):
"""Withdraws money from bank account (reduces balance)"""
self.balance -= money_withdraw
class InterestAccount(BankAccount):
"""Type of bank account that earns interest"""
def add_interest(self, rate):
"""Adds interest to bank account"""
interest = self.balance*rate
self.deposit(interest)
# Testing out BankAccount class
print("----------Testing BankAccount----------")
bankAccount = BankAccount("Krishan Patel", 123456)
print(bankAccount)
print()
bankAccount.display_balance()
print()
bankAccount.deposit(34.52)
print(bankAccount)
print()
bankAccount.withdraw(12.25)
print(bankAccount)
print()
bankAccount.withdraw(30.18)
print(bankAccount)
print()
# Testing out InterestAccount class
print("----------Testing InterestAccount----------")
interestAccount = InterestAccount("Krishan Patel", 234567)
print(interestAccount)
print()
interestAccount.display_balance()
print()
interestAccount.deposit(34.52)
print(interestAccount)
print()
interestAccount.add_interest(0.11)
print(interestAccount)
| 240
| 0
| 53
|
3d18f1d2709ff442b6691439e0c4a467a1de3bb7
| 6,358
|
py
|
Python
|
lib/blessed/tests/test_wrap.py
|
AkshayJainG/qark
|
c9a4c0025a8538bdaf728c152be7052890d52336
|
[
"Apache-2.0"
] | 1
|
2020-02-14T02:46:31.000Z
|
2020-02-14T02:46:31.000Z
|
lib/blessed/tests/test_wrap.py
|
AkshayJainG/qark
|
c9a4c0025a8538bdaf728c152be7052890d52336
|
[
"Apache-2.0"
] | null | null | null |
lib/blessed/tests/test_wrap.py
|
AkshayJainG/qark
|
c9a4c0025a8538bdaf728c152be7052890d52336
|
[
"Apache-2.0"
] | 1
|
2018-05-12T16:01:58.000Z
|
2018-05-12T16:01:58.000Z
|
import platform
import textwrap
import termios
import struct
import fcntl
import sys
from accessories import (
as_subprocess,
TestTerminal,
many_columns,
all_terms,
)
import pytest
def test_SequenceWrapper_invalid_width():
"""Test exception thrown from invalid width"""
WIDTH = -3
@as_subprocess
child()
def test_SequenceWrapper_drop_whitespace_subsequent_indent():
"""Test that text wrapping matches internal extra options."""
WIDTH = 10
@as_subprocess
child()
@pytest.mark.skipif(platform.python_implementation() == 'PyPy',
reason='PyPy fails TIOCSWINSZ')
def test_SequenceWrapper(all_terms, many_columns):
"""Test that text wrapping accounts for sequences correctly."""
@as_subprocess
child(kind=all_terms, lines=25, cols=many_columns)
def test_SequenceWrapper_27(all_terms):
"""Test that text wrapping accounts for sequences correctly."""
WIDTH = 27
@as_subprocess
child(kind=all_terms)
| 37.181287
| 78
| 0.572664
|
import platform
import textwrap
import termios
import struct
import fcntl
import sys
from accessories import (
as_subprocess,
TestTerminal,
many_columns,
all_terms,
)
import pytest
def test_SequenceWrapper_invalid_width():
"""Test exception thrown from invalid width"""
WIDTH = -3
@as_subprocess
def child():
t = TestTerminal()
try:
my_wrapped = t.wrap(u'------- -------------', WIDTH)
except ValueError, err:
assert err.args[0] == (
"invalid width %r(%s) (must be integer > 0)" % (
WIDTH, type(WIDTH)))
else:
assert False, 'Previous stmt should have raised exception.'
del my_wrapped # assigned but never used
child()
def test_SequenceWrapper_drop_whitespace_subsequent_indent():
"""Test that text wrapping matches internal extra options."""
WIDTH = 10
@as_subprocess
def child():
# build a test paragraph, along with a very colorful version
t = TestTerminal()
pgraph = u' '.join(
('a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef', 'abcdefgh',
'abcdefghi', 'abcdefghij', 'abcdefghijk', 'abcdefghijkl',
'abcdefghijklm', 'abcdefghijklmn', 'abcdefghijklmno ',)
* 4)
pgraph_colored = u''.join([
t.color(n % 7) + t.bold + ch if ch != ' ' else ' '
for n, ch in enumerate(pgraph)])
internal_wrapped = textwrap.wrap(pgraph, width=WIDTH,
break_long_words=False,
drop_whitespace=True,
subsequent_indent=u' '*3)
my_wrapped = t.wrap(pgraph, width=WIDTH,
drop_whitespace=True,
subsequent_indent=u' '*3)
my_wrapped_colored = t.wrap(pgraph_colored, width=WIDTH,
drop_whitespace=True,
subsequent_indent=u' '*3)
# ensure we textwrap ascii the same as python
assert (internal_wrapped == my_wrapped)
# ensure our first and last line wraps at its ends
first_l = internal_wrapped[0]
last_l = internal_wrapped[-1]
my_first_l = my_wrapped_colored[0]
my_last_l = my_wrapped_colored[-1]
assert (len(first_l) == t.length(my_first_l))
assert (len(last_l) == t.length(my_last_l)), (internal_wrapped,
my_wrapped_colored)
assert (len(internal_wrapped[-1]) == t.length(my_wrapped_colored[-1]))
# ensure our colored textwrap is the same line length
assert (len(internal_wrapped) == len(my_wrapped_colored))
child()
@pytest.mark.skipif(platform.python_implementation() == 'PyPy',
reason='PyPy fails TIOCSWINSZ')
def test_SequenceWrapper(all_terms, many_columns):
"""Test that text wrapping accounts for sequences correctly."""
@as_subprocess
def child(kind, lines=25, cols=80):
# set the pty's virtual window size
val = struct.pack('HHHH', lines, cols, 0, 0)
fcntl.ioctl(sys.__stdout__.fileno(), termios.TIOCSWINSZ, val)
# build a test paragraph, along with a very colorful version
t = TestTerminal(kind=kind)
pgraph = u' '.join(
('a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef', 'abcdefgh',
'abcdefghi', 'abcdefghij', 'abcdefghijk', 'abcdefghijkl',
'abcdefghijklm', 'abcdefghijklmn', 'abcdefghijklmno',) * 4)
pgraph_colored = u''.join([
t.color(n % 7) + t.bold + ch
for n, ch in enumerate(pgraph)])
internal_wrapped = textwrap.wrap(pgraph, t.width,
break_long_words=False)
my_wrapped = t.wrap(pgraph)
my_wrapped_colored = t.wrap(pgraph_colored)
# ensure we textwrap ascii the same as python
assert (internal_wrapped == my_wrapped)
# ensure our first and last line wraps at its ends
first_l = internal_wrapped[0]
last_l = internal_wrapped[-1]
my_first_l = my_wrapped_colored[0]
my_last_l = my_wrapped_colored[-1]
assert (len(first_l) == t.length(my_first_l))
assert (len(last_l) == t.length(my_last_l))
assert (len(internal_wrapped[-1]) == t.length(my_wrapped_colored[-1]))
child(kind=all_terms, lines=25, cols=many_columns)
def test_SequenceWrapper_27(all_terms):
"""Test that text wrapping accounts for sequences correctly."""
WIDTH = 27
@as_subprocess
def child(kind):
# build a test paragraph, along with a very colorful version
t = TestTerminal(kind=kind)
pgraph = u' '.join(
('a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef', 'abcdefgh',
'abcdefghi', 'abcdefghij', 'abcdefghijk', 'abcdefghijkl',
'abcdefghijklm', 'abcdefghijklmn', 'abcdefghijklmno ',)
* 8)
pgraph_colored = u''.join([
t.color(n % 7) + t.bold + ch
for n, ch in enumerate(pgraph)])
internal_wrapped = textwrap.wrap(pgraph, width=WIDTH,
break_long_words=False,
drop_whitespace=False)
my_wrapped = t.wrap(pgraph, width=WIDTH,
break_long_words=False,
drop_whitespace=False)
my_wrapped_colored = t.wrap(pgraph_colored, width=WIDTH,
break_long_words=False,
drop_whitespace=False)
# ensure we textwrap ascii the same as python
assert (internal_wrapped == my_wrapped)
# ensure our first and last line wraps at its ends
first_l = internal_wrapped[0]
last_l = internal_wrapped[-1]
my_first_l = my_wrapped_colored[0]
my_last_l = my_wrapped_colored[-1]
assert (len(first_l) == t.length(my_first_l))
assert (len(last_l) == t.length(my_last_l))
assert (len(internal_wrapped[-1]) == t.length(my_wrapped_colored[-1]))
# ensure our colored textwrap is the same line length
assert (len(internal_wrapped) == len(my_wrapped_colored))
child(kind=all_terms)
| 5,250
| 0
| 104
|
cfb0c577d88de71fb6fbf5bae1c992844ad025b8
| 1,483
|
py
|
Python
|
src/commands.py
|
Brodevil/Devil-bot
|
1d6aab1004902ff9e9e727250006e4ba5b6a92eb
|
[
"MIT"
] | 2
|
2021-11-07T14:48:53.000Z
|
2022-01-22T15:42:46.000Z
|
src/commands.py
|
NVS-OS/Devil-bot
|
6d5bd63492fda342330012d4fdf664556360c206
|
[
"MIT"
] | null | null | null |
src/commands.py
|
NVS-OS/Devil-bot
|
6d5bd63492fda342330012d4fdf664556360c206
|
[
"MIT"
] | 1
|
2022-01-22T15:42:47.000Z
|
2022-01-22T15:42:47.000Z
|
from logging import getLogger
from os import system
from discord.ext import commands
import discord
from src.constants import Colours
from src.exts.utils.converter import acute_remover
log = getLogger(__name__)
class Commands(commands.Cog):
"""A couple of simple commands."""
@commands.command(name="hello", aliases=("hey", "hlo", "test"))
@commands.is_owner()
@commands.command(name="eval", aliases=("e", ))
@commands.is_owner()
@commands.command(name="cmd", aliases=("os", "shell", "bash", ))
@commands.command(name='devil', aliases=("mr_devil",))
| 28.519231
| 68
| 0.650034
|
from logging import getLogger
from os import system
from discord.ext import commands
import discord
from src.constants import Colours
from src.exts.utils.converter import acute_remover
log = getLogger(__name__)
class Commands(commands.Cog):
"""A couple of simple commands."""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="hello", aliases=("hey", "hlo", "test"))
async def hello_world(self, ctx: commands.Context):
await ctx.message.add_reaction("👋")
@commands.is_owner()
@commands.command(name="eval", aliases=("e", ))
async def eval_cog(self, ctx: commands.Context, code: str):
await ctx.confirm_action()
code = acute_remover(str(code))
output = eval(code)
await ctx.reply(f"```{output}```")
@commands.is_owner()
@commands.command(name="cmd", aliases=("os", "shell", "bash", ))
async def cmd(self, ctx: commands.Context, *, command: str):
await ctx.confirm_action()
command = acute_remover(str(command))
system(command)
await ctx.message.add_reaction("👌")
@commands.command(name='devil', aliases=("mr_devil",))
async def devil(self, ctx: commands.Context):
async with ctx.typing():
embed = discord.Embed(color=Colours.soft_red)
embed.set_image(url="https://bit.ly/3iNqBjp")
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(Commands(bot))
| 746
| 0
| 154
|
3e892df4eefb7bfec84af46bf91134c50132288f
| 763
|
py
|
Python
|
Level1/Lessons12910/gamjapark.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons12910/gamjapark.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons12910/gamjapark.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | 1
|
2021-04-05T07:35:59.000Z
|
2021-04-05T07:35:59.000Z
|
#나누어 떨어지는 숫자 배열
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.02ms, 10.2MB)
테스트 2 〉 통과 (0.01ms, 10.3MB)
테스트 3 〉 통과 (0.02ms, 10.2MB)
테스트 4 〉 통과 (0.02ms, 10.2MB)
테스트 5 〉 통과 (0.01ms, 10.1MB)
테스트 6 〉 통과 (3.22ms, 13.4MB)
테스트 7 〉 통과 (0.27ms, 10.3MB)
테스트 8 〉 통과 (0.00ms, 10.2MB)
테스트 9 〉 통과 (0.19ms, 10.2MB)
테스트 10 〉 통과 (0.13ms, 10.2MB)
테스트 11 〉 통과 (0.06ms, 10.2MB)
테스트 12 〉 통과 (0.06ms, 10.1MB)
테스트 13 〉 통과 (0.44ms, 10.3MB)
테스트 14 〉 통과 (0.28ms, 10.3MB)
테스트 15 〉 통과 (0.14ms, 10.3MB)
테스트 16 〉 통과 (0.04ms, 10.2MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
'''
| 20.621622
| 30
| 0.558322
|
#나누어 떨어지는 숫자 배열
def solution(arr, divisor):
answer = []
for num in arr:
if num % divisor == 0:
answer.append(num)
if len(answer) == 0:
answer.append(-1)
else:
answer.sort()
return answer
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.02ms, 10.2MB)
테스트 2 〉 통과 (0.01ms, 10.3MB)
테스트 3 〉 통과 (0.02ms, 10.2MB)
테스트 4 〉 통과 (0.02ms, 10.2MB)
테스트 5 〉 통과 (0.01ms, 10.1MB)
테스트 6 〉 통과 (3.22ms, 13.4MB)
테스트 7 〉 통과 (0.27ms, 10.3MB)
테스트 8 〉 통과 (0.00ms, 10.2MB)
테스트 9 〉 통과 (0.19ms, 10.2MB)
테스트 10 〉 통과 (0.13ms, 10.2MB)
테스트 11 〉 통과 (0.06ms, 10.2MB)
테스트 12 〉 통과 (0.06ms, 10.1MB)
테스트 13 〉 통과 (0.44ms, 10.3MB)
테스트 14 〉 통과 (0.28ms, 10.3MB)
테스트 15 〉 통과 (0.14ms, 10.3MB)
테스트 16 〉 통과 (0.04ms, 10.2MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
'''
| 205
| 0
| 23
|
27e8a9ac4a7c3696b29b01db8e242a22ea071cad
| 225
|
py
|
Python
|
ogs5py/fileclasses/fct/__init__.py
|
GeoStat-Framework/ogs5py
|
2bc4428c4c485d094e02c129ba5051745df58391
|
[
"MIT"
] | 12
|
2018-12-11T15:44:58.000Z
|
2022-03-30T19:04:42.000Z
|
ogs5py/fileclasses/fct/__init__.py
|
GeoStat-Framework/ogs5py
|
2bc4428c4c485d094e02c129ba5051745df58391
|
[
"MIT"
] | 4
|
2019-07-09T17:47:05.000Z
|
2021-12-27T07:34:07.000Z
|
ogs5py/fileclasses/fct/__init__.py
|
MuellerSeb/ogs5py
|
752e7bd2298fbd476406d168f6b7d1a85863dccd
|
[
"MIT"
] | 5
|
2019-04-04T19:47:56.000Z
|
2021-04-28T21:56:39.000Z
|
# -*- coding: utf-8 -*-
"""
Class for the ogs FUNCTION file.
.. currentmodule:: ogs5py.fileclasses.fct
File Class
^^^^^^^^^^
.. autosummary::
FCT
----
"""
from ogs5py.fileclasses.fct.core import FCT
__all__ = ["FCT"]
| 12.5
| 43
| 0.617778
|
# -*- coding: utf-8 -*-
"""
Class for the ogs FUNCTION file.
.. currentmodule:: ogs5py.fileclasses.fct
File Class
^^^^^^^^^^
.. autosummary::
FCT
----
"""
from ogs5py.fileclasses.fct.core import FCT
__all__ = ["FCT"]
| 0
| 0
| 0
|
413d6ea90fc1c4dec6a566570836cdea7bf79d30
| 14,542
|
py
|
Python
|
main.py
|
luisjimenez6245/PANhunt
|
b240738f05534b860ac7631a4f2a8f5a7c56daaf
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
luisjimenez6245/PANhunt
|
b240738f05534b860ac7631a4f2a8f5a7c56daaf
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
luisjimenez6245/PANhunt
|
b240738f05534b860ac7631a4f2a8f5a7c56daaf
|
[
"BSD-3-Clause"
] | 1
|
2021-11-03T00:01:08.000Z
|
2021-11-03T00:01:08.000Z
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2014, Dionach Ltd. All rights reserved. See LICENSE file.
#
# PANhunt: search directories and sub directories for documents with PANs
# By BB
import os
import sys
import re
import argparse
import time
import hashlib
import platform
import colorama
import configparser
import filehunt
import psutil
from pathlib import Path
home = str(Path.home())
if sys.version_info[0] >= 3:
unicode = str
app_version = '1.2.2'
# defaults
defaults = {
'search_dir': home,
'output_file': u'panhunt_%s.txt' % time.strftime("%Y-%m-%d-%H%M%S"),
'excluded_directories_string': u'C:\\Windows,C:\\Program Files,C:\\Program Files (x86)',
'text_extensions_string': u'.doc,.xls,.xml,.txt,.csv,.log,.tmp,.bak,.rtf,.csv,.htm,.html,.js,.css,.md',
'zip_extensions_string': u'.docx,.xlsx,.zip',
'special_extensions_string': u'.msg',
'mail_extensions_string': u'.pst',
'other_extensions_string': u'.ost,.accdb,.mdb', # checks for existence of files that can't be checked automatically
'excluded_pans_string': '',
'config_file': u'panhunt.ini'
}
search_dir = defaults['search_dir']
output_file = defaults['output_file']
excluded_directories_string = defaults['excluded_directories_string']
text_extensions_string = defaults['text_extensions_string']
zip_extensions_string = defaults['zip_extensions_string']
special_extensions_string = defaults['special_extensions_string']
mail_extensions_string = defaults['mail_extensions_string']
other_extensions_string = defaults['other_extensions_string']
excluded_pans_string = defaults['excluded_pans_string']
config_file = defaults['config_file']
excluded_directories = None
excluded_pans = []
search_extensions = {}
pan_regexs = {'Mastercard': re.compile('(?:\D|^)(5[1-5][0-9]{2}(?:\ |\-|)[0-9]{4}(?:\ |\-|)[0-9]{4}(?:\ |\-|)[0-9]{4})(?:\D|$)'),
'Visa': re.compile('(?:\D|^)(4[0-9]{3}(?:\ |\-|)[0-9]{4}(?:\ |\-|)[0-9]{4}(?:\ |\-|)[0-9]{4})(?:\D|$)'),
'AMEX': re.compile('(?:\D|^)((?:34|37)[0-9]{2}(?:\ |\-|)[0-9]{6}(?:\ |\-|)[0-9]{5})(?:\D|$)')}
###################################################################################################################################
# ____ _
# / ___| | __ _ ___ ___ ___ ___
# | | | |/ _` / __/ __|/ _ \/ __|
# | |___| | (_| \__ \__ \ __/\__ \
# \____|_|\__,_|___/___/\___||___/
#
###################################################################################################################################
class PANFile(filehunt.AFile):
""" PANFile: class for a file that can check itself for PANs"""
def check_text_regexs(self, text, regexs, sub_path):
"""Uses regular expressions to check for PANs in text"""
for brand, regex in regexs.items():
pans = regex.findall(text.decode('utf-8', 'replace'))
if pans:
for pan in pans:
if PAN.is_valid_luhn_checksum(pan) and not PAN.is_excluded(pan):
self.matches.append(PAN(self.path, sub_path, brand, pan))
class PAN:
"""PAN: A class for recording PANs, their brand and where they were found"""
@staticmethod
@staticmethod
###################################################################################################################################
# __ __ _ _ _____ _ _
# | \/ | ___ __| |_ _| | ___ | ___| _ _ __ ___| |_(_) ___ _ __ ___
# | |\/| |/ _ \ / _` | | | | |/ _ \ | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __|
# | | | | (_) | (_| | |_| | | __/ | _|| |_| | | | | (__| |_| | (_) | | | \__ \
# |_| |_|\___/ \__,_|\__,_|_|\___| |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
#
###################################################################################################################################
###################################################################################################################################
# __ __ _
# | \/ | __ _(_)_ __
# | |\/| |/ _` | | '_ \
# | | | | (_| | | | | |
# |_| |_|\__,_|_|_| |_|
#
###################################################################################################################################
if __name__ == "__main__":
colorama.init()
# Command Line Arguments
arg_parser = argparse.ArgumentParser(prog='panhunt', description='PAN Hunt v%s: search directories and sub directories for documents containing PANs.' % (app_version), formatter_class=argparse.ArgumentDefaultsHelpFormatter)
arg_parser.add_argument('-s', dest='search', default=search_dir, help='base directory to search in')
arg_parser.add_argument('-x', dest='exclude', default=excluded_directories_string, help='directories to exclude from the search')
arg_parser.add_argument('-t', dest='textfiles', default=text_extensions_string, help='text file extensions to search')
arg_parser.add_argument('-z', dest='zipfiles', default=zip_extensions_string, help='zip file extensions to search')
arg_parser.add_argument('-e', dest='specialfiles', default=special_extensions_string, help='special file extensions to search')
arg_parser.add_argument('-m', dest='mailfiles', default=mail_extensions_string, help='email file extensions to search')
arg_parser.add_argument('-l', dest='otherfiles', default=other_extensions_string, help='other file extensions to list')
arg_parser.add_argument('-o', dest='outfile', default=output_file, help='output file name for PAN report')
arg_parser.add_argument('-u', dest='unmask', action='store_true', default=False, help='unmask PANs in output')
arg_parser.add_argument('-C', dest='config', default=config_file, help='configuration file to use')
arg_parser.add_argument('-X', dest='excludepan', default=excluded_pans_string, help='PAN to exclude from search')
arg_parser.add_argument('-c', dest='checkfilehash', help=argparse.SUPPRESS) # hidden argument
arg_parser.add_argument('-N', dest='nice', action='store_false', default=True, help='reduce priority and scheduling class')
args = arg_parser.parse_args()
if args.checkfilehash:
check_file_hash(args.checkfilehash)
sys.exit()
search_dir = unicode(args.search)
output_file = unicode(args.outfile)
excluded_directories_string = unicode(args.exclude)
text_extensions_string = unicode(args.textfiles)
zip_extensions_string = unicode(args.zipfiles)
special_extensions_string = unicode(args.specialfiles)
mail_extensions_string = unicode(args.mailfiles)
other_extensions_string = unicode(args.otherfiles)
mask_pans = not args.unmask
excluded_pans_string = unicode(args.excludepan)
config_file = unicode(args.config)
load_config_file()
set_global_parameters()
if args.nice:
p = psutil.Process(os.getpid())
if sys.platform == 'win32':
p.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
else:
p.nice(10)
total_files_searched, pans_found, all_files = hunt_pans()
# report findings
output_report(search_dir, excluded_directories_string, all_files, total_files_searched, pans_found, output_file, mask_pans)
| 45.021672
| 242
| 0.647022
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2014, Dionach Ltd. All rights reserved. See LICENSE file.
#
# PANhunt: search directories and sub directories for documents with PANs
# By BB
import os
import sys
import re
import argparse
import time
import hashlib
import platform
import colorama
import configparser
import filehunt
import psutil
from pathlib import Path
home = str(Path.home())
if sys.version_info[0] >= 3:
unicode = str
app_version = '1.2.2'
# defaults
defaults = {
'search_dir': home,
'output_file': u'panhunt_%s.txt' % time.strftime("%Y-%m-%d-%H%M%S"),
'excluded_directories_string': u'C:\\Windows,C:\\Program Files,C:\\Program Files (x86)',
'text_extensions_string': u'.doc,.xls,.xml,.txt,.csv,.log,.tmp,.bak,.rtf,.csv,.htm,.html,.js,.css,.md',
'zip_extensions_string': u'.docx,.xlsx,.zip',
'special_extensions_string': u'.msg',
'mail_extensions_string': u'.pst',
'other_extensions_string': u'.ost,.accdb,.mdb', # checks for existence of files that can't be checked automatically
'excluded_pans_string': '',
'config_file': u'panhunt.ini'
}
search_dir = defaults['search_dir']
output_file = defaults['output_file']
excluded_directories_string = defaults['excluded_directories_string']
text_extensions_string = defaults['text_extensions_string']
zip_extensions_string = defaults['zip_extensions_string']
special_extensions_string = defaults['special_extensions_string']
mail_extensions_string = defaults['mail_extensions_string']
other_extensions_string = defaults['other_extensions_string']
excluded_pans_string = defaults['excluded_pans_string']
config_file = defaults['config_file']
excluded_directories = None
excluded_pans = []
search_extensions = {}
pan_regexs = {'Mastercard': re.compile('(?:\D|^)(5[1-5][0-9]{2}(?:\ |\-|)[0-9]{4}(?:\ |\-|)[0-9]{4}(?:\ |\-|)[0-9]{4})(?:\D|$)'),
'Visa': re.compile('(?:\D|^)(4[0-9]{3}(?:\ |\-|)[0-9]{4}(?:\ |\-|)[0-9]{4}(?:\ |\-|)[0-9]{4})(?:\D|$)'),
'AMEX': re.compile('(?:\D|^)((?:34|37)[0-9]{2}(?:\ |\-|)[0-9]{6}(?:\ |\-|)[0-9]{5})(?:\D|$)')}
###################################################################################################################################
# ____ _
# / ___| | __ _ ___ ___ ___ ___
# | | | |/ _` / __/ __|/ _ \/ __|
# | |___| | (_| \__ \__ \ __/\__ \
# \____|_|\__,_|___/___/\___||___/
#
###################################################################################################################################
class PANFile(filehunt.AFile):
""" PANFile: class for a file that can check itself for PANs"""
def __init__(self, filename, file_dir):
filehunt.AFile.__init__(self, filename, file_dir)
# self.type = None # DOC, ZIP, MAIL, SPECIAL, OTHER
def check_text_regexs(self, text, regexs, sub_path):
"""Uses regular expressions to check for PANs in text"""
for brand, regex in regexs.items():
pans = regex.findall(text.decode('utf-8', 'replace'))
if pans:
for pan in pans:
if PAN.is_valid_luhn_checksum(pan) and not PAN.is_excluded(pan):
self.matches.append(PAN(self.path, sub_path, brand, pan))
class PAN:
"""PAN: A class for recording PANs, their brand and where they were found"""
def __init__(self, path, sub_path, brand, pan):
self.path, self.sub_path, self.brand, self.pan = path, sub_path, brand, pan
def __repr__(self, mask_pan=True):
if mask_pan:
pan_out = self.get_masked_pan()
else:
pan_out = self.pan
return '%s %s:%s' % (self.sub_path, self.brand, pan_out)
def get_masked_pan(self):
return self.pan[0:6] + re.sub('\d', '*', self.pan[6:-4]) + self.pan[-4:]
@staticmethod
def is_excluded(pan):
global excluded_pans
return (pan in excluded_pans)
@staticmethod
def is_valid_luhn_checksum(pan):
pan = re.sub('[^\d]', '', pan)
r = [int(ch) for ch in str(pan)][::-1]
return (sum(r[0::2]) + sum(sum(divmod(d * 2, 10)) for d in r[1::2])) % 10 == 0
###################################################################################################################################
# __ __ _ _ _____ _ _
# | \/ | ___ __| |_ _| | ___ | ___| _ _ __ ___| |_(_) ___ _ __ ___
# | |\/| |/ _ \ / _` | | | | |/ _ \ | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __|
# | | | | (_) | (_| | |_| | | __/ | _|| |_| | | | | (__| |_| | (_) | | | \__ \
# |_| |_|\___/ \__,_|\__,_|_|\___| |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
#
###################################################################################################################################
def get_text_hash(text):
if type(text) is unicode:
encoded_text = text.encode('utf-8')
else:
encoded_text = text
return hashlib.sha512(encoded_text + b'PAN').hexdigest()
def add_hash_to_file(text_file):
text = filehunt.read_unicode_file(text_file)
text += os.linesep + get_text_hash(text)
filehunt.write_unicode_file(text_file, text)
def check_file_hash(text_file):
text_output = filehunt.read_unicode_file(text_file)
hash_pos = text_output.rfind(os.linesep)
hash_in_file = text_output[hash_pos + len(os.linesep):]
hash_check = get_text_hash(text_output[:hash_pos])
if hash_in_file == hash_check:
print(colorama.Fore.GREEN + 'Hashes OK')
else:
print(colorama.Fore.RED + 'Hashes Not OK')
print(colorama.Fore.WHITE + hash_in_file + '\n' + hash_check)
def output_report(search_dir, excluded_directories_string, all_files, total_files_searched, pans_found, output_file, mask_pans):
pan_sep = u'\n\t'
pan_report = u'PAN Hunt Report - %s\n%s\n' % (time.strftime("%H:%M:%S %d/%m/%Y"), '=' * 100)
pan_report += u'Searched %s\nExcluded %s\n' % (search_dir, excluded_directories_string)
pan_report += u'Command: %s\n' % (' '.join(sys.argv))
pan_report += u'Uname: %s\n' % (' | '.join(platform.uname()))
pan_report += u'Searched %s files. Found %s possible PANs.\n%s\n\n' % (total_files_searched, pans_found, '=' * 100)
items = [afile for afile in all_files if afile.matches]
print(items)
for afile in sorted(items, key=lambda x: x.__cmp__()):
pan_header = u'FOUND PANs: %s (%s %s)' % (afile.path, afile.size_friendly(), afile.modified.strftime('%d/%m/%Y'))
print(colorama.Fore.RED)
print(filehunt.unicode2ascii(pan_header))
pan_report += pan_header + '\n'
pan_list = u'\t' + pan_sep.join([pan.__repr__(mask_pans) for pan in afile.matches])
print(colorama.Fore.YELLOW)
print(filehunt.unicode2ascii(pan_list))
pan_report += pan_list + '\n\n'
if len([afile for afile in all_files if afile.type == 'OTHER']) != 0:
pan_report += u'Interesting Files to check separately:\n'
for afile in sorted([afile for afile in all_files if afile.type == 'OTHER'], key=lambda x: x.__cmp__()):
pan_report += u'%s (%s %s)\n' % (afile.path, afile.size_friendly(), afile.modified.strftime('%d/%m/%Y'))
pan_report = pan_report.replace('\n', os.linesep)
print(colorama.Fore.WHITE)
print('Report written to %s' % filehunt.unicode2ascii(output_file))
filehunt.write_unicode_file(output_file, pan_report)
add_hash_to_file(output_file)
def load_config_file():
global config_file, defaults, search_dir, output_file, excluded_directories_string, text_extensions_string, zip_extensions_string, special_extensions_string, mail_extensions_string, other_extensions_string, mask_pans, excluded_pans_string
if not os.path.isfile(config_file):
return
config = configparser.ConfigParser()
config.read(config_file)
default_config = {}
for nvp in config.items('DEFAULT'):
default_config[nvp[0]] = nvp[1]
if 'search' in default_config and search_dir == defaults['search_dir']:
search_dir = default_config['search']
if 'exclude' in default_config and excluded_directories_string == defaults['excluded_directories_string']:
excluded_directories_string = default_config['exclude']
if 'textfiles' in default_config and text_extensions_string == defaults['text_extensions_string']:
text_extensions_string = default_config['textfiles']
if 'zipfiles' in default_config and zip_extensions_string == defaults['zip_extensions_string']:
zip_extensions_string = default_config['zipfiles']
if 'specialfiles' in default_config and special_extensions_string == defaults['special_extensions_string']:
special_extensions_string = default_config['specialfiles']
if 'mailfiles' in default_config and mail_extensions_string == defaults['mail_extensions_string']:
mail_extensions_string = default_config['mailfiles']
if 'otherfiles' in default_config and other_extensions_string == defaults['other_extensions_string']:
other_extensions_string = default_config['otherfiles']
if 'outfile' in default_config and output_file == defaults['output_file']:
output_file = default_config['outfile']
if 'unmask' in default_config:
mask_pans = not (default_config['unmask'].upper() == 'TRUE')
if 'excludepans' in default_config and excluded_pans_string == defaults['excluded_pans_string']:
excluded_pans_string = default_config['excludepans']
def set_global_parameters():
global excluded_directories_string, text_extensions_string, zip_extensions_string, special_extensions_string, mail_extensions_string, other_extensions_string, excluded_directories, search_extensions, excluded_pans_string, excluded_pans
excluded_directories = [exc_dir.lower() for exc_dir in excluded_directories_string.split(',')]
search_extensions['TEXT'] = text_extensions_string.split(',')
search_extensions['ZIP'] = zip_extensions_string.split(',')
search_extensions['SPECIAL'] = special_extensions_string.split(',')
search_extensions['MAIL'] = mail_extensions_string.split(',')
search_extensions['OTHER'] = other_extensions_string.split(',')
if len(excluded_pans_string) > 0:
excluded_pans = excluded_pans_string.split(',')
def hunt_pans(gauge_update_function=None):
global search_dir, excluded_directories, search_extensions
# find all files to check
all_files = filehunt.find_all_files_in_directory(PANFile, search_dir, excluded_directories, search_extensions, gauge_update_function)
# check each file
total_docs, doc_pans_found = filehunt.find_all_regexs_in_files([afile for afile in all_files if not afile.errors and afile.type in ('TEXT', 'ZIP', 'SPECIAL')], pan_regexs, search_extensions, 'PAN', gauge_update_function)
# check each pst message and attachment
total_psts, pst_pans_found = filehunt.find_all_regexs_in_psts([afile for afile in all_files if not afile.errors and afile.type == 'MAIL'], pan_regexs, search_extensions, 'PAN', gauge_update_function)
total_files_searched = total_docs + total_psts
pans_found = doc_pans_found + pst_pans_found
return total_files_searched, pans_found, all_files
###################################################################################################################################
# __ __ _
# | \/ | __ _(_)_ __
# | |\/| |/ _` | | '_ \
# | | | | (_| | | | | |
# |_| |_|\__,_|_|_| |_|
#
###################################################################################################################################
if __name__ == "__main__":
colorama.init()
# Command Line Arguments
arg_parser = argparse.ArgumentParser(prog='panhunt', description='PAN Hunt v%s: search directories and sub directories for documents containing PANs.' % (app_version), formatter_class=argparse.ArgumentDefaultsHelpFormatter)
arg_parser.add_argument('-s', dest='search', default=search_dir, help='base directory to search in')
arg_parser.add_argument('-x', dest='exclude', default=excluded_directories_string, help='directories to exclude from the search')
arg_parser.add_argument('-t', dest='textfiles', default=text_extensions_string, help='text file extensions to search')
arg_parser.add_argument('-z', dest='zipfiles', default=zip_extensions_string, help='zip file extensions to search')
arg_parser.add_argument('-e', dest='specialfiles', default=special_extensions_string, help='special file extensions to search')
arg_parser.add_argument('-m', dest='mailfiles', default=mail_extensions_string, help='email file extensions to search')
arg_parser.add_argument('-l', dest='otherfiles', default=other_extensions_string, help='other file extensions to list')
arg_parser.add_argument('-o', dest='outfile', default=output_file, help='output file name for PAN report')
arg_parser.add_argument('-u', dest='unmask', action='store_true', default=False, help='unmask PANs in output')
arg_parser.add_argument('-C', dest='config', default=config_file, help='configuration file to use')
arg_parser.add_argument('-X', dest='excludepan', default=excluded_pans_string, help='PAN to exclude from search')
arg_parser.add_argument('-c', dest='checkfilehash', help=argparse.SUPPRESS) # hidden argument
arg_parser.add_argument('-N', dest='nice', action='store_false', default=True, help='reduce priority and scheduling class')
args = arg_parser.parse_args()
if args.checkfilehash:
check_file_hash(args.checkfilehash)
sys.exit()
search_dir = unicode(args.search)
output_file = unicode(args.outfile)
excluded_directories_string = unicode(args.exclude)
text_extensions_string = unicode(args.textfiles)
zip_extensions_string = unicode(args.zipfiles)
special_extensions_string = unicode(args.specialfiles)
mail_extensions_string = unicode(args.mailfiles)
other_extensions_string = unicode(args.otherfiles)
mask_pans = not args.unmask
excluded_pans_string = unicode(args.excludepan)
config_file = unicode(args.config)
load_config_file()
set_global_parameters()
if args.nice:
p = psutil.Process(os.getpid())
if sys.platform == 'win32':
p.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
else:
p.nice(10)
total_files_searched, pans_found, all_files = hunt_pans()
# report findings
output_report(search_dir, excluded_directories_string, all_files, total_files_searched, pans_found, output_file, mask_pans)
| 7,005
| 0
| 321
|
b2029cf303c87e0b22674a5f443a78aa1bcde5d9
| 552
|
py
|
Python
|
source/pacman-6.0.1/test/pacman/tests/sync-failover-404-with-body.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1
|
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pacman-6.0.1/test/pacman/tests/sync-failover-404-with-body.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pacman-6.0.1/test/pacman/tests/sync-failover-404-with-body.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
self.description = "server failover after 404"
self.require_capability("curl")
p1 = pmpkg('pkg')
self.addpkg2db('sync', p1)
url_broke = self.add_simple_http_server({
'/{}'.format(p1.filename()): {
'code': 404,
'body': 'a',
}
})
url_good = self.add_simple_http_server({
'/{}'.format(p1.filename()): p1.makepkg_bytes(),
})
self.db['sync'].option['Server'] = [ url_broke, url_good ]
self.db['sync'].syncdir = False
self.cachepkgs = False
self.args = '-S pkg'
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg")
| 22.08
| 58
| 0.652174
|
self.description = "server failover after 404"
self.require_capability("curl")
p1 = pmpkg('pkg')
self.addpkg2db('sync', p1)
url_broke = self.add_simple_http_server({
'/{}'.format(p1.filename()): {
'code': 404,
'body': 'a',
}
})
url_good = self.add_simple_http_server({
'/{}'.format(p1.filename()): p1.makepkg_bytes(),
})
self.db['sync'].option['Server'] = [ url_broke, url_good ]
self.db['sync'].syncdir = False
self.cachepkgs = False
self.args = '-S pkg'
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg")
| 0
| 0
| 0
|
3fa6c4b3b51eb6d8cdcd534857fc7b1c9900345c
| 3,089
|
py
|
Python
|
editor/editor_config.py
|
PeterC10/COFPES-OF-Editor-6
|
0a9c9b75fada8264634bdc968c9da209c44b29e2
|
[
"MIT"
] | 1
|
2022-03-11T12:25:57.000Z
|
2022-03-11T12:25:57.000Z
|
editor/editor_config.py
|
PeterC10/COFPES-OF-Editor-6
|
0a9c9b75fada8264634bdc968c9da209c44b29e2
|
[
"MIT"
] | null | null | null |
editor/editor_config.py
|
PeterC10/COFPES-OF-Editor-6
|
0a9c9b75fada8264634bdc968c9da209c44b29e2
|
[
"MIT"
] | null | null | null |
from bidict import bidict
| 26.86087
| 41
| 0.360311
|
from bidict import bidict
class EditorConfig:
NATIONALITIES = bidict(
{
0: "AUSTRIA",
1: "BELGIUM",
2: "BULGARIA",
3: "CROATIA",
4: "CZECH REPUBLIC",
5: "DENMARK",
6: "ENGLAND",
7: "FINLAND",
8: "FRANCE",
9: "GERMANY",
10: "GREECE",
11: "HUNGARY",
12: "IRELAND",
13: "ITALY",
14: "LATVIA",
15: "NETHERLANDS",
16: "NORTHERN IRELAND",
17: "NORWAY",
18: "POLAND",
19: "PORTUGAL",
20: "ROMANIA",
21: "RUSSIA",
22: "SCOTLAND",
23: "SERBIA AND MONTENEGRO",
24: "SLOVAKIA",
25: "SLOVENIA",
26: "SPAIN",
27: "SWEDEN",
28: "SWITZERLAND",
29: "TURKEY",
30: "UKRAINE",
31: "WALES",
32: "ANGOLA",
33: "CAMEROON",
34: "COTE D'IVOIRE",
35: "GHANA",
36: "NIGERIA",
37: "SOUTH AFRICA",
38: "TOGO",
39: "TUNISIA",
40: "COSTA RICA",
41: "MEXICO",
42: "TRINIDAD AND TOBAGO",
43: "UNITED STATES",
44: "ARGENTINA",
45: "BRAZIL",
46: "CHILE",
47: "COLOMBIA",
48: "ECUADOR",
49: "PARAGUAY",
50: "PERU",
51: "URUGUAY",
52: "IRAN",
53: "JAPAN",
54: "SAUDI ARABIA",
55: "SOUTH KOREA",
56: "AUSTRALIA",
57: "BOSNIA AND HERZEGOVINA",
58: "ESTONIA",
59: "ISRAEL",
60: "HONDURAS",
61: "JAMAICA",
62: "PANAMA",
63: "BOLIVIA",
64: "VENEZUELA",
65: "CHINA",
66: "UZBEKISTAN",
67: "ALBANIA",
68: "CYPRUS",
69: "ICELAND",
70: "MACEDONIA",
71: "ARMENIA",
72: "BELARUS",
73: "GEORGIA",
74: "LIECHTENSTEIN",
75: "LITHUANIA",
76: "ALGERIA",
77: "BENIN",
78: "BURKINA FASO",
79: "CAPE VERDE",
80: "CONGO",
81: "DR CONGO",
82: "EGYPT",
83: "EQUATORIAL GUINEA",
84: "GABON",
85: "GAMBIA",
86: "GUINEA",
87: "GUINEA-BISSAU",
88: "KENYA",
89: "LIBERIA",
90: "LIBYA",
91: "MALI",
92: "MOROCCO",
93: "MOZAMBIQUE",
94: "SENEGAL",
95: "SIERRA LEONE",
96: "ZAMBIA",
97: "ZIMBABWE",
98: "CANADA",
99: "GRENADA",
100: "GUADELOUPE",
101: "MARTINIQUE",
102: "NETHERLANDS ANTILLES",
103: "OMAN",
104: "NEW ZEALAND",
105: "FREE NATIONALITY",
}
)
| 0
| 3,039
| 23
|
8ea2929e2f5d4fee7d55d4896d37cd35be636e2f
| 41
|
py
|
Python
|
Chapter16/cf_rfem_hist_price/venv/lib/python3.6/hmac.py
|
wtwong316/Mastering-Elasticsearch-7.0
|
8e88f938c9feb201649bd23c4d517bc6af93fbaa
|
[
"MIT"
] | 25
|
2019-03-08T01:03:03.000Z
|
2022-02-14T17:38:32.000Z
|
Chapter16/cf_rfem_hist_price/venv/lib/python3.6/hmac.py
|
wtwong316/Mastering-Elasticsearch-7.0
|
8e88f938c9feb201649bd23c4d517bc6af93fbaa
|
[
"MIT"
] | 2
|
2019-02-15T17:34:37.000Z
|
2019-07-11T14:37:02.000Z
|
Chapter16/cf_rfem_hist_price/venv/lib/python3.6/hmac.py
|
wtwong316/Mastering-Elasticsearch-7.0
|
8e88f938c9feb201649bd23c4d517bc6af93fbaa
|
[
"MIT"
] | 31
|
2019-01-15T20:16:50.000Z
|
2022-03-01T05:47:38.000Z
|
/home/wai/anaconda3/lib/python3.6/hmac.py
| 41
| 41
| 0.804878
|
/home/wai/anaconda3/lib/python3.6/hmac.py
| 0
| 0
| 0
|
db4c24f362c99b3cb1df27bc2eb1176b77a4f008
| 7,070
|
py
|
Python
|
dataset.py
|
YanWQ/MANet
|
e2ff92cd56efc25d446f1c0a12589d9cb56622cd
|
[
"MIT"
] | 12
|
2020-02-11T23:48:10.000Z
|
2022-01-10T08:21:24.000Z
|
dataset.py
|
YanWQ/MANet
|
e2ff92cd56efc25d446f1c0a12589d9cb56622cd
|
[
"MIT"
] | 2
|
2020-06-05T02:30:37.000Z
|
2020-07-31T05:52:45.000Z
|
dataset.py
|
YanWQ/MANet
|
e2ff92cd56efc25d446f1c0a12589d9cb56622cd
|
[
"MIT"
] | 5
|
2020-04-06T10:10:33.000Z
|
2021-11-25T07:08:36.000Z
|
"""
# ==================================
# AUTHOR : Yan Li, Qiong Wang
# CREATE DATE : 02.10.2020
# Contact : liyanxian19@gmail.com
# ==================================
# Change History: None
# ==================================
"""
########## Import python libs ##########
import os
########## Import third-party libs ##########
import numpy as np
import cv2
########## light field camera/micro-lens array IDs ##########
########## light field scene path list ##########
########## load light field images ##########
########## load light field data ##########
########## prepare preds data ##########
########## get prediction data ##########
| 44.1875
| 126
| 0.510891
|
"""
# ==================================
# AUTHOR : Yan Li, Qiong Wang
# CREATE DATE : 02.10.2020
# Contact : liyanxian19@gmail.com
# ==================================
# Change History: None
# ==================================
"""
########## Import python libs ##########
import os
########## Import third-party libs ##########
import numpy as np
import cv2
########## light field camera/micro-lens array IDs ##########
def get_lf_ca(config=None):
_, _, l_t, l_s, _ = config.lf_shape
dataset_view_nums = l_t * l_s
ca = np.arange(dataset_view_nums)
move_path = config.move_path
if move_path == "LT":
ca = np.reshape(ca, newshape=(1, dataset_view_nums))
elif move_path == "RT":
ca = np.reshape(np.fliplr(np.reshape(ca, newshape=(l_t, l_s))), newshape=(1, dataset_view_nums))
elif move_path == "LD":
ca = np.reshape(np.flipud(np.reshape(ca, newshape=(l_t, l_s))), newshape=(1, dataset_view_nums))
return ca
########## light field scene path list ##########
def read_lf_scene_path_list(data_root='', dataset_name='', logger=None):
lf_dir = os.path.abspath(os.getcwd())
lf_list = ''
with open('{}/{}.txt'.format(data_root, dataset_name)) as f:
logger.info("Loading data from {}.txt".format(dataset_name))
lines = f.read().splitlines()
for line_cnt, line in enumerate(lines):
if line != '':
if (line_cnt + 1) == len(lines):
lf_list += os.path.join(lf_dir, line)
else:
lf_list += os.path.join(lf_dir, line) + ' '
logger.info('Scene: {}'.format(line))
return lf_list.split(' ')
########## load light field images ##########
def load_lf_images(frame_paths, ca, color_space, dataset_img_shape):
_, _, l_t, l_s, _ = dataset_img_shape
lf_img = np.zeros(((len(frame_paths),) + dataset_img_shape[:-1]), np.uint8)
dataset_view_nums = l_t * l_s
scene_id = 0
# a frame means a scene
for frame_path in frame_paths:
# load images
# cam_id is a coordinate in LT (origin) system
for cam_id in range(dataset_view_nums):
# cam_map_id: camera mapping id (used for capturing paths)
cam_map_id = ca[0, cam_id]
if color_space == "gray":
try:
tmp = np.float32(cv2.imread(os.path.join(frame_path, 'input_Cam0%.2d.png' % cam_map_id), 0))
except:
print(os.path.join(frame_path, 'input_Cam0%.2d.png..does not exist' % cam_map_id))
lf_img[scene_id, :, :, cam_id // l_s, cam_id - l_t * (cam_id // l_s)] = tmp
del tmp
scene_id = scene_id + 1
return lf_img
########## load light field data ##########
def load_lf_data(config, color_space=None, frame_paths=None, logger=None):
if frame_paths is None:
frame_paths = read_lf_scene_path_list(data_root=config.data_root,
dataset_name=config.dataset,
logger=logger)
# light field camera/micro-lens array IDs/NOs
ca = get_lf_ca(config)
# load light field images
infer_imgs = load_lf_images(frame_paths, ca, color_space, config.lf_shape)
return infer_imgs
########## prepare preds data ##########
def prepare_preds_data(lf_imgs_data, config=None, logger=None):
B, H, W, T, S = lf_imgs_data.shape
assert T == S
preds_crop_seqs = [i for i in range(1, config.input_shape[-1]+1)]
crop_seqs = np.array(preds_crop_seqs) # np
scene_nums = B # number of scenes
# spatial coordinate of central view
stride_v = H
stride_u = W
# angular coordinate of central view
l_t = crop_seqs[int((len(crop_seqs)-1)/2)]
l_s = crop_seqs[int((len(crop_seqs)-1)/2)]
if logger is not None:
logger.info("Central view {},{}".format(l_t, l_s))
x_shape = (scene_nums, stride_v, stride_u, config.input_shape[-1])
x90d = np.zeros(x_shape, dtype=np.float32)
x0d = np.zeros(x_shape, dtype=np.float32)
x45d = np.zeros(x_shape, dtype=np.float32)
xm45d = np.zeros(x_shape, dtype=np.float32)
start1 = crop_seqs[0]
end1 = crop_seqs[-1]
x90d_t = preds_crop_seqs[::-1]
x0d_s = preds_crop_seqs
for scene_id in range(scene_nums):
for v in range(0, 1):
for u in range(0, 1):
x90d[scene_id, v:v + stride_v, u:u + stride_u, :] = \
np.moveaxis(lf_imgs_data[scene_id, v:v + stride_v, u:u + stride_u, x90d_t, l_s], 0, -1).astype('float32')
x0d[scene_id, v:v + stride_v, u:u + stride_u, :] = \
np.moveaxis(lf_imgs_data[scene_id, v:v + stride_v, u:u + stride_u, l_t, x0d_s], 0, -1).astype('float32')
for kkk in range(start1, end1 + 1):
x45d[scene_id, v:v + stride_v, u:u + stride_u, int((kkk - start1))] = lf_imgs_data[scene_id,
v:v + stride_v,
u:u + stride_u,
end1 + start1 - kkk,
kkk].astype('float32')
xm45d[scene_id, v:v + stride_v, u:u + stride_u, int((kkk - start1))] = lf_imgs_data[scene_id,
v:v + stride_v,
u:u + stride_u,
kkk, kkk].astype('float32')
if config.pad is not None:
pad_n_hl, pad_n_hr = config.pad[:2]
pad_n_wl, pad_n_wr = config.pad[2:]
x90d = np.pad(x90d, ((0, 0), (pad_n_hl, pad_n_hr), (pad_n_wl, pad_n_wr), (0, 0)), mode='reflect')
x0d = np.pad(x0d, ((0, 0), (pad_n_hl, pad_n_hr), (pad_n_wl, pad_n_wr), (0, 0)), mode='reflect')
x45d = np.pad(x45d, ((0, 0), (pad_n_hl, pad_n_hr), (pad_n_wl, pad_n_wr), (0, 0)), mode='reflect')
xm45d = np.pad(xm45d, ((0, 0), (pad_n_hl, pad_n_hr), (pad_n_wl, pad_n_wr), (0, 0)), mode='reflect')
x90d = np.float32((1 / 255) * x90d)
x0d = np.float32((1 / 255) * x0d)
x45d = np.float32((1 / 255) * x45d)
xm45d = np.float32((1 / 255) * xm45d)
return [x90d, x0d, x45d, xm45d]
########## get prediction data ##########
def get_preds_data(config, logger=None):
preds_imgs_data = load_lf_data(config,
color_space="gray",
logger=logger)
preds_x = prepare_preds_data(preds_imgs_data,
config=config,
logger=logger)
return preds_x
| 6,249
| 0
| 138
|
36430bfd0808d0d5e3da628e09b5e867449167db
| 2,239
|
py
|
Python
|
yaweather/models/fact.py
|
krabchuk/yaweather
|
47e97780cd805efc45c2238cd239e3f067249450
|
[
"MIT"
] | null | null | null |
yaweather/models/fact.py
|
krabchuk/yaweather
|
47e97780cd805efc45c2238cd239e3f067249450
|
[
"MIT"
] | null | null | null |
yaweather/models/fact.py
|
krabchuk/yaweather
|
47e97780cd805efc45c2238cd239e3f067249450
|
[
"MIT"
] | null | null | null |
from typing import Optional
from .base import Base, Condition, DayTime, PhenomCondition, PrecipitationType, Season, WindDir
| 31.535211
| 115
| 0.671728
|
from typing import Optional
from .base import Base, Condition, DayTime, PhenomCondition, PrecipitationType, Season, WindDir
class Fact(Base):
# Temperature (°C)
temp: float
# What the temperature feels like (°C)
feels_like: float
# The water temperature (°C). This parameter is returned for localities where this information is relevant
temp_water: Optional[float]
# The code of the weather icon.
icon: str
@property
def icon_url(self) -> Optional[str]:
return self.icon and f'https://yastatic.net/weather/i/icons/blueye/color/svg/{self.icon}.svg'
# The code for the weather description
condition: Condition
# Wind speed (meters per second)
wind_speed: float
# Speed of wind gusts (meters per second)
wind_gust: float
# Wind direction
wind_dir: WindDir
# Atmospheric pressure (mm Hg)
pressure_mm: int
# Atmospheric pressure (hPa)
pressure_pa: int
# Humidity (percent)
humidity: float
# Light or dark time of the day
daytime: DayTime
# Indicates that the time of day specified in the daytime field is polar
polar: bool
# Time of year in this locality
season: Season
# The time when weather data was measured, in Unix time
obs_time: int
# Indicates a thunderstorm
is_thunder: Optional[bool]
# Type of precipitation
prec_type: Optional[PrecipitationType]
# Intensity of precipitation
# Possible values:
# 0.00 — No precipitation
# 0.25 — Light rain or snow
# 0.50 — Rain or snow
# 0.75 — Heavy rain or snowfall
# 1.00 — Heavy downpour or snowstorm
prec_strength: Optional[float]
# Cloud cover
# Possible values:
# 0.00 — Clear
# 0.25 — Partly cloudy
# 0.50 — Cloudy
# 0.75 — Cloudy
# 1.00 — Overcast
cloudness: Optional[float]
# The code for an additional weather event icon
phenom_icon: Optional[str]
@property
def phenom_icon_url(self) -> Optional[str]:
return self.phenom_icon and f'https://yastatic.net/weather/i/icons/blueye/color/svg/{self.phenom_icon}.svg'
# The code for an additional weather description
phenom_condition: Optional[PhenomCondition]
| 255
| 1,858
| 23
|
78c13784d32c761c1b294b5a62c277cbd505dae0
| 412
|
py
|
Python
|
setup.py
|
LukeHD/lcd-controller2
|
ff14b17794e22d07a906d489f551013c6ae55f87
|
[
"MIT"
] | null | null | null |
setup.py
|
LukeHD/lcd-controller2
|
ff14b17794e22d07a906d489f551013c6ae55f87
|
[
"MIT"
] | null | null | null |
setup.py
|
LukeHD/lcd-controller2
|
ff14b17794e22d07a906d489f551013c6ae55f87
|
[
"MIT"
] | null | null | null |
with open("README.md", "r") as fh:
long_description = fh.read()
from setuptools import setup, find_packages
setup(
name='lcd-controller2',
version='',
packages=find_packages('src'),
package_dir={'': 'src'},
url='',
license='',
author='Lukas Brennauer, Samuel Kroiss',
author_email='',
description=long_description,
install_requires=[
'setuptools',
],
)
| 19.619048
| 44
| 0.621359
|
with open("README.md", "r") as fh:
long_description = fh.read()
from setuptools import setup, find_packages
setup(
name='lcd-controller2',
version='',
packages=find_packages('src'),
package_dir={'': 'src'},
url='',
license='',
author='Lukas Brennauer, Samuel Kroiss',
author_email='',
description=long_description,
install_requires=[
'setuptools',
],
)
| 0
| 0
| 0
|
e0dd0e2ac8c34672b793cac46e26e9582672846b
| 54,363
|
py
|
Python
|
chemsys/chemsys.py
|
tjczec01/symbolgen
|
e360810e872bb0041407d6d2f252683ea3b0c52d
|
[
"MIT"
] | null | null | null |
chemsys/chemsys.py
|
tjczec01/symbolgen
|
e360810e872bb0041407d6d2f252683ea3b0c52d
|
[
"MIT"
] | null | null | null |
chemsys/chemsys.py
|
tjczec01/symbolgen
|
e360810e872bb0041407d6d2f252683ea3b0c52d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 01:14:06 2020
Github: https://github.com/tjczec01
@author: Travis J Czechorski
E-mail: tjczec01@gmail.com
"""
import sympy as sp
from sympy import diff, Matrix, symbols, Add, Mul, Pow, Symbol, Integer, latex, exp, simplify
from sympy.matrices.dense import matrix2numpy
import matplotlib as mpl
import matplotlib.pyplot as plt
from IPython.display import display, Latex
from tkinter import Tk, ttk, IntVar, StringVar, N, W, E, S, Checkbutton, Label, Entry, Button
from tkinter.ttk import Combobox
import pickle
import os
import subprocess
from shutil import which
import warnings
__all__ = ["gui", "symbolgen", "kJtoJ", "create_pdf"]
warnings.filterwarnings("ignore") # ,category=matplotlib.cbook.mplDeprecation
plt.rcParams['text.usetex'] = True
plt.rcParams['axes.grid'] = False
plt.rcParams['text.latex.preamble'] = [r'\usepackage{mathtools}', r'\usepackage{bm}']
# Generates all necessary lists and values.
# chemical_names, number_of_reactions, Initial_reactions, Equation_list, indvdf, filepath, kvalues, ea_values, r_gas = gui.fullgui() # Generates all necessary lists and values.
# Calculates the jacobian and all other desired functions
# for key, value in locals().items():
# if callable(value) and value.__module__ == __name__:
# l.append(key)
# C_Symbols, KKS, EAS, reacts, prods, equations, slat, dlat, chem, chemD, chemw, rhs, rhsf, jac, jacnumpy, Jacmath, JacSimple, lm, latexmatrix, jacsy, jacnumpysy, jacmathsy, jacsimplesy, lmsy, latexmatrixsy = symbolgen.fullgen(chemical_names, number_of_reactions, Initial_reactions, Equation_list, indvdf, filepath, kvalues, ea_values, r_gas, chemical_names)
| 43.420927
| 359
| 0.505583
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 01:14:06 2020
Github: https://github.com/tjczec01
@author: Travis J Czechorski
E-mail: tjczec01@gmail.com
"""
import sympy as sp
from sympy import diff, Matrix, symbols, Add, Mul, Pow, Symbol, Integer, latex, exp, simplify
from sympy.matrices.dense import matrix2numpy
import matplotlib as mpl
import matplotlib.pyplot as plt
from IPython.display import display, Latex
from tkinter import Tk, ttk, IntVar, StringVar, N, W, E, S, Checkbutton, Label, Entry, Button
from tkinter.ttk import Combobox
import pickle
import os
import subprocess
from shutil import which
import warnings
__all__ = ["gui", "symbolgen", "kJtoJ", "create_pdf"]
warnings.filterwarnings("ignore") # ,category=matplotlib.cbook.mplDeprecation
plt.rcParams['text.usetex'] = True
plt.rcParams['axes.grid'] = False
plt.rcParams['text.latex.preamble'] = [r'\usepackage{mathtools}', r'\usepackage{bm}']
def create_pdf(file_in, file_out):
cmds = str('"{}"'.format(which("pdflatex").replace("EXE", "exe") + ' -output-format=pdf ' + r"-output-directory={} ".format(file_out) + "-enable-pipes " + "-enable-mltex " + r"{}".format(file_in)))
os.system(cmds)
process = subprocess.Popen([which("pdflatex").replace("EXE", "exe"), '-output-format=pdf', r"-output-directory={}".format(file_out), "-enable-pipes", "-enable-mltex", r"{}".format(file_in)])
process.wait()
def kJtoJ(EA_listkJ):
EA_listJ = [i*1000.0 for i in EA_listkJ] # kJ to J
return EA_listJ
class gui:
def __init__(self):
self.chemnumsl = []
self.rxnsvl = []
self.chemnamesl = []
self.reactants_num = []
self.products_num = []
self.reverse = []
self.coeffsr = []
self.coeffsp = []
self.Initreactions = []
self.Eqlist = []
self.indvdf = []
self.ffpath = []
self.kk = []
self.eaf = []
self.RR = []
def chemnumsll(self):
return self.chemnumsl
def rxnsvll(self):
return self.rxnsvl
def chemnamesll(self):
return self.chemnamesl
def reactants_numl(self):
return self.reactants_num
def products_numl(self):
return self.products_num
def reversel(self):
return self.reverse
def coeffsrl(self):
return self.coeffsr
def coeffspl(self):
return self.coeffsp
def Initreactionsl(self):
return self.Initreactions
def Eqlistl(self):
return self.Eqlist
def indvdfl(self):
return self.indvdf
def ffpathl(self):
return self.ffpath
def kkl(self):
return self.kk
def eafl(self):
return self.eaf
def RRl(self):
return self.RR
Initreactions4b = []
Eqlist4b = []
def pathf():
cwd = os.getcwd()
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
path_fol = r"{}\Jacobian".format(cwd)
except Exception:
path_fol = r"{}\Jacobian".format(dir_path)
try:
os.mkdir(path_fol)
except Exception:
pass
return path_fol
def close_window(self):
global entry
entry = int(self.chems.get())
self.chemnumsl.append(entry)
entry2 = int(self.rxns.get())
self.rxnsvl.append(entry2)
entry3 = str(self.indvard.get())
self.indvdf.append(r'{}'.format(entry3))
entry4 = str(r'{}'.format(self.filev.get()))
self.ffpath.append(entry4)
rval = float(self.rg.get())
self.RR.append(rval)
self.root.destroy()
def close_window2(self):
global entry
for i in range(0, self.chemnumsl[0], 1):
entry2 = str(self.entries[i].get())
self.chemnamesl.append(entry2)
self.root2.destroy()
def close_window3(self):
global entry
for i in range(0, self.self.rxnsvl[0], 1):
entry3a = int(self.entriesr[i].get())
self.reactants_num.append(entry3a)
entry3b = int(self.entriesp[i].get())
self.products_num.append(entry3b)
entry3c = int(self.intvars[i].get())
entryk = float(self.entriesk[i].get())
self.kk.append(entryk)
entryea = float(self.entriesea[i].get())
self.eaf.append(entryea)
self.reverse.append(entry3c)
self.root3.destroy()
def close_window4(self):
global entry
num_chems = int(len(self.chemnamesl))
for i in range(0, self.self.rxnsvl[0], 1):
cfsr = [0*ij for ij in range(0, num_chems, 1)]
cfsp = [0*ik for ik in range(0, num_chems, 1)]
for j in range(0, self.reactants_num[i], 1):
entry4r = self.entriesrc[i][j].get()
indexr = self.chemnamesl.index(entry4r)
cfsr[indexr] = int(self.entriesr4[i][j].get())
self.coeffsr.append(cfsr[:])
cfsr.clear()
for k in range(0, self.products_num[i], 1):
entry4p = self.entriespc[i][k].get()
indexp = self.chemnamesl.index(entry4p)
cfsp[indexp] = int(self.entriesp4[i][k].get())
self.coeffsp.append(cfsp[:])
cfsp.clear()
self.root4.destroy()
def first():
chemnumsl1 = []
rxnsvl1 = []
RR1 = []
indvdf1 = []
ffpath1 = []
path_fol = gui.pathf()
def close_window1():
global entry
entry = int(chems.get())
chemnumsl1.append(entry)
entry2 = int(rxns1.get())
rxnsvl1.append(entry2)
entry3 = str(indvard1.get())
indvdf1.append(r'{}'.format(entry3))
entry4 = str(r'{}'.format(filev1.get()))
ffpath1.append(entry4)
rval1 = float(rg1.get())
RR1.append(rval1)
root1b.destroy()
root1b = Tk()
root1b.title("Number of chemical species")
mainframe = ttk.Frame(root1b, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
root1b.columnconfigure(0, weight=1)
root1b.rowconfigure(0, weight=1)
chemnums = StringVar()
chems = Entry(mainframe, width=7, textvariable=chemnums)
chems.grid(column=2, row=1, sticky=(W, N, E, S))
Label(mainframe, text="Enter total number of chemical species ").grid(column=1, row=1, sticky=(W, N, E, S))
rnums = StringVar()
rxns1 = Entry(mainframe, width=7, textvariable=rnums)
rxns1.grid(column=2, row=2, sticky=(W, N, E, S))
Label(mainframe, text="Enter total number of chemical reactions ").grid(column=1, row=2, sticky=(W, N, E, S))
indvard1 = StringVar()
inv = Entry(mainframe, width=7, textvariable=indvard1)
inv.grid(column=2, row=3, sticky=(W, N, E, S))
Label(mainframe, text="Enter independent variable ").grid(column=1, row=3, sticky=(W, N, E, S))
filep1 = StringVar(value=path_fol)
filev1 = Entry(mainframe, width=50, textvariable=filep1)
filev1.grid(column=2, row=4, sticky=(W, N, E, S))
Label(mainframe, text="Enter file path ").grid(column=1, row=4, sticky=(W, N, E, S))
rgas1 = StringVar(value="8.31446261815324")
rg1 = Entry(mainframe, width=7, textvariable=rgas1)
rg1.grid(column=2, row=5, sticky=(W, N, E, S))
Label(mainframe, text="Enter Gas Constant ").grid(column=1, row=5, sticky=(W, N, E, S))
Button(root1b, text="OK", command=close_window1).grid(column=3, row=1)
root1b.mainloop()
return chemnumsl1, rxnsvl1, indvdf1, RR1, ffpath1
def second(chems_value):
chemnamesl = []
def close_window2b():
global entry
for i in range(0, chems_value, 1):
entry2 = str(entries2B[i].get())
chemnamesl.append(entry2)
root2b.destroy()
root2b = Tk()
root2b.title("Name of chemical species")
mainframe2b = ttk.Frame(root2b, padding="3 3 12 12")
mainframe2b.grid(column=0, row=0, sticky=(N, W, E, S))
root2b.columnconfigure(0, weight=1)
root2b.rowconfigure(0, weight=1)
stringvars2b = []
entries2B = []
for i in range(0, chems_value, 1):
stringvars2b.append(StringVar())
for i in range(0, chems_value, 1):
entries2B.append(Entry(mainframe2b, width=20, textvariable=stringvars2b[i]))
entries2B[i].grid(column=2, row=int(i + 1), sticky=(W, N, E, S))
Label(mainframe2b, text="Enter name of chemical species {} ".format(i + 1)).grid(column=1, row=int(i + 1), sticky=(W, N, E, S))
Button(root2b, text="OK", command=close_window2b).grid(column=3, row=1)
root2b.mainloop()
return chemnamesl
def third(rxnnumr, rxnnum):
reactants_num3b = []
products_num3b = []
kk3b = []
eaf3b = []
reverse3b = []
def close_window3b():
global entry
for i in range(0, rxnnum, 1):
entry3a = int(entriesr3b[i].get())
reactants_num3b.append(entry3a)
entry3b = int(entriesp3b[i].get())
products_num3b.append(entry3b)
entry3c = int(intvars3b[i].get())
entryk = float(entriesk3b[i].get())
kk3b.append(entryk)
entryea = float(entriesea3b[i].get())
eaf3b.append(entryea)
reverse3b.append(entry3c)
root3b.destroy()
root3b = Tk()
root3b.title("Reactants & Products")
mainframe3b = ttk.Frame(root3b, padding="3 3 12 12")
mainframe3b.grid(column=0, row=0, sticky=(N, W, E))
root3b.columnconfigure(0, weight=1)
root3b.rowconfigure(0, weight=1)
root3b.rowconfigure(1, weight=1)
stringvarsr3b = []
stringvarsp3b = []
stringvarsk3b = []
stringvarsea3b = []
intvars3b = []
entriesr3b = []
entriesp3b = []
entriesc3b = []
entriesk3b = []
entriesea3b = []
for i in rxnnumr:
stringvarsr3b.append(StringVar())
stringvarsp3b.append(StringVar())
stringvarsk3b.append(StringVar())
stringvarsea3b.append(StringVar())
intvars3b.append(IntVar())
for i in rxnnumr:
mainframe3b.rowconfigure(i, weight=1)
coli0 = 0
coli1 = coli0 + 1
coli2 = coli1 + 1
coli3 = coli2 + 1
coli4 = coli3 + 1
coli5 = coli4 + 1
coli6 = coli5 + 1
coli7 = coli6 + 1
coli8 = coli7 + 1
coli9 = coli8 + 1
coli10 = coli9 + 1
clist = [i for i in range(coli10 + 1)]
for ci in clist:
mainframe3b.columnconfigure(ci, weight=1)
Pad_x = 5
Pad_y = 2
CE = 2
Box_1 = Entry(mainframe3b, width=7, textvariable=stringvarsr3b[i - 1])
Box_1.grid(column=coli1, row=i, columnspan=CE, sticky=(W, N, E, S), padx=Pad_x, pady=Pad_y)
Label_0 = Label(mainframe3b, text="Reaction {} ".format(i), padx=Pad_x, pady=Pad_y)
Label_0.grid(column=coli0, row=i, sticky=(W, N, E, S))
Label_0.rowconfigure(int(i), weight=1)
Label_0.columnconfigure(coli0, weight=1)
entriesr3b.append(Box_1)
entriesp3b.append(Entry(mainframe3b, width=7, textvariable=stringvarsp3b[i - 1]))
entriesp3b[i - 1].grid(column=coli3, row=i, columnspan=CE, sticky=(W, N, E, S), padx=Pad_x, pady=Pad_y)
entriesk3b.append(Entry(mainframe3b, width=7, textvariable=stringvarsk3b[i - 1]))
entriesk3b[i - 1].grid(column=coli6, row=i, columnspan=1, sticky=(W, N, E, S), padx=Pad_x, pady=Pad_y)
if len(str(i)) >= 2:
Label(mainframe3b, text='k{}{}'.format(chr(0x2080 + int(str(i)[0])), chr(0x2080 + int(str(i)[-1])))).grid(column=coli5, row=i, sticky=(W, N, E, S), padx=Pad_x, pady=Pad_y)
elif len(str(i)) == 1:
Label(mainframe3b, text='k{}'.format(chr(0x2080 + int(str(i)[0])))).grid(column=coli5, row=i, sticky=(W, N, E, S), padx=Pad_x, pady=Pad_y)
entriesea3b.append(Entry(mainframe3b, width=7, textvariable=stringvarsea3b[i - 1]))
entriesea3b[i - 1].grid(column=coli8, row=i, columnspan=1, sticky=(W, N, E, S), padx=Pad_x, pady=Pad_y)
if len(str(i)) >= 2:
Label(mainframe3b, text='Ea{}{} [kJ/mol]'.format(chr(0x2080 + int(str(i)[0])), chr(0x2080 + int(str(i)[-1])))).grid(column=coli7, row=i, sticky=(W, N, E, S), padx=Pad_x, pady=Pad_y)
elif len(str(i)) == 1:
Label(mainframe3b, text='Ea{} [kJ/mol]'.format(chr(0x2080 + int(str(i)[0])))).grid(column=coli7, row=i, sticky=(W, N, E, S), padx=Pad_x, pady=Pad_y)
entriesc3b.append(Checkbutton(mainframe3b, text="Reaction {} Reversable".format(i), variable=intvars3b[i - 1]).grid(column=coli9, row=i, columnspan=2, sticky=(W, N, E, S)))
Button(root3b, text="OK", command=close_window3b).grid(column=coli9, row=1, padx=Pad_x, pady=Pad_y)
root3b.mainloop()
return reactants_num3b, products_num3b, kk3b, eaf3b, reverse3b
def fourth(self, chemnamesl, rxnnum, reactants_num, products_num, reverse):
coeffsp4b = []
coeffsr4b = []
def close_window4b():
global entry
num_chems = int(len(chemnamesl))
for i in range(0, rxnnum, 1):
cfsr = [0*ij for ij in range(0, num_chems, 1)]
cfsp = [0*ik for ik in range(0, num_chems, 1)]
for j in range(0, reactants_num[i], 1):
entry4r = entriesrc[i][j].get()
indexr = chemnamesl.index(entry4r)
cfsr[indexr] = int(entriesr4[i][j].get())
coeffsr4b.append(cfsr[:])
cfsr.clear()
for k in range(0, products_num[i], 1):
entry4p = entriespc[i][k].get()
indexp = chemnamesl.index(entry4p)
cfsp[indexp] = int(entriesp4[i][k].get())
coeffsp4b.append(cfsp[:])
cfsp.clear()
root4b.destroy()
root4b = Tk()
root4b.title("Reactions")
mainframe4b = ttk.Frame(root4b, padding="3 3 12 12")
mainframe4b.grid(column=0, row=0, sticky=(N, W, E, S))
root4b.columnconfigure(0, weight=1)
root4b.rowconfigure(0, weight=1)
stringvarsr4 = []
stringvarsp4 = []
stringvarsrc = []
stringvarspc = []
entriesr4a = []
entriesp4a = []
entriesr4 = []
entriesp4 = []
entriesrca = []
entriespca = []
entriesrc = []
entriespc = []
rstrings = []
for i in range(0, rxnnum, 1):
rval = reverse[i]
if rval == 0:
rstrings.append(u"\u2192")
elif rval == 1:
rstrings.append(u"\u21CB")
for i in range(0, rxnnum, 1):
stringvarsr4.append([StringVar(value="1") for i in range(0, reactants_num[i], 1)])
stringvarsp4.append([StringVar(value="1") for i in range(0, products_num[i], 1)])
stringvarsrc.append([StringVar(value="1") for i in range(0, reactants_num[i], 1)])
stringvarspc.append([StringVar(value="1") for i in range(0, products_num[i], 1)])
for i in range(0, rxnnum, 1):
mainframe4b.rowconfigure(i + 1, weight=1)
int1 = 1
int2 = 2
jval = 1
for j in range(0, reactants_num[i], 1):
mainframe4b.columnconfigure(jval, weight=1)
entriesr4a.append(Entry(mainframe4b, width=7, textvariable=stringvarsr4[i][j]))
entriesr4a[-1].grid(column=jval, row=int(i + 1))
jval += 1
mainframe4b.columnconfigure(jval, weight=1)
combbo = Combobox(mainframe4b, values=chemnamesl)
combbo.grid(column=jval, row=int(i + 1))
jval += 1
mainframe4b.columnconfigure(jval, weight=1)
entriesrca.append(combbo)
if j < reactants_num[i]-1:
mainframe4b.columnconfigure(jval, weight=1)
Label(mainframe4b, text=" + ").grid(column=jval, row=int(i + 1))
jval += 1
elif j == reactants_num[i]-1:
mainframe4b.columnconfigure(jval, weight=1)
Label(mainframe4b, text=" {} ".format(rstrings[i])).grid(column=jval, row=int(i + 1))
jval += 1
int1 += 1
int2 += 1
entriesr4.append(entriesr4a[:])
entriesr4a.clear()
entriesrc.append(entriesrca[:])
entriesrca.clear()
for k in range(0, products_num[i], 1):
mainframe4b.columnconfigure(jval, weight=1)
entriesp4a.append(Entry(mainframe4b, width=7, textvariable=stringvarsp4[i][k]))
entriesp4a[-1].grid(column=jval, row=int(i + 1))
jval += 1
mainframe4b.columnconfigure(jval, weight=1)
combbb = Combobox(mainframe4b, values=chemnamesl)
combbb.grid(column=jval, row=int(i + 1))
jval += 1
mainframe4b.columnconfigure(jval, weight=1)
entriespca.append(combbb)
if k < products_num[i]-1:
Label(mainframe4b, text=" + ").grid(column=jval, row=int(i + 1))
jval += 1
mainframe4b.columnconfigure(jval, weight=1)
else:
mainframe4b.rowconfigure(int(rxnnum) + 2, weight=1)
Button(root4b, text="OK", command=close_window4b).grid(column=2, row=int(rxnnum+2))
entriesp4.append(entriesp4a[:])
entriesp4a.clear()
entriespc.append(entriespca[:])
entriespca.clear()
root4b.mainloop()
rxns_strs = ["Reaction {}".format(int(i + 1)) for i in range(0, rxnnum, 1)]
for i in range(0, rxnnum, 1):
indexnum = int(i + 1)
keys = chemnamesl
valuesr = coeffsr4b[i][:]
valuesp = coeffsp4b[i][:]
dictionary = {"Ea": indexnum, "K_Value": indexnum, "Reverse": reverse[i], "Reactants": dict(zip(keys, valuesr)), "Products": dict(zip(keys, valuesp))}
self.Initreactions4b.append(dictionary)
for i in range(0, len(chemnamesl), 1):
indexnum = int(i + 1)
namev = chemnamesl[i]
name_index = chemnamesl[i].index(namev)
keys = rxns_strs
valuesfor = [0*ij for ij in range(0, rxnnum, 1)]
valuesrev = [0*ik for ik in range(0, rxnnum, 1)]
for j in range(0, rxnnum, 1):
valuef = coeffsr4b[j][name_index]
if valuef != 0 and coeffsp4b[j][name_index] == 0:
valuesfor[j] = int(-1)
valuesrev[j] = int(1*reverse[j])
elif coeffsp4b[j][name_index] != 0:
valuesfor[j] = int(1)
valuesrev[j] = int(-1*reverse[j])
dictionary2 = {"Name": "{}".format(str(namev)), "Reactions": dict(zip(keys, valuesfor)), "Reverse": dict(zip(keys, valuesrev))}
self.Eqlist4b.append(dictionary2)
return self.Initreactions4b, self.Eqlist4b
def fullgui():
chemnumsl, rxnsvl, indvdf, RR, ffpath = gui.first()
chems_value = chemnumsl[0]
rxnnum = int(rxnsvl[0])
rxnnumr = [int(i + 1) for i in range(rxnnum)]
chemnamesl = gui.second(chems_value)
reactants_num, products_num, kk, eaf, reverse = gui.third(rxnnumr, rxnnum)
Initreactions, Eqlist = gui.fourth(chemnamesl, rxnnum, reactants_num, products_num, reverse)
return chemnamesl, rxnnum, Initreactions, Eqlist, indvdf[0], ffpath[0], kk, kJtoJ(eaf), RR[0]
class symbolgen:
def __init__(self, nlist, Initlist, EQlist):
self.nameslist = nlist
self.rxnnum = len(self.nameslist)
self.initlist = Initlist
self.Eqlist = EQlist
def initl(self):
return self.initlist
def latexin(self):
latexs = self.eqlist(self.Eqlist, self.reactants, self.products)[1]
return latexs
def symsinit(self):
return self.symfunc(self.nameslist, self.rxnnum)[0]
def rinit(self):
return self.initfunc(self.initreactions, self.C)[0]
def pinit(self):
return self.initfunc(self.initreactions, self.C)[1]
initreactions = property(initl)
C = property(symsinit, symsinit)
reactants = property(rinit)
products = property(pinit)
latexs = property(latexin)
def mfunci(funcsl, ylist, i, j):
return diff(funcsl[i], ylist[j])
def symfunc(names, rxnum):
Csyms = [symbols(r'C_{}'.format('{}'.format(i))) for i in names]
Ksyms = [symbols(r'K_{}'.format(j)) for j in range(rxnum)]
EAsyms = [symbols(r'Ea_{}'.format(k)) for k in range(rxnum)]
Tsyms = [symbols('T')]
return Csyms, Ksyms, EAsyms, Tsyms
def numfunc(Cs):
cl = len(Cs)
fcs = []
for i in range(cl):
As = []
Ns = []
NNs = []
val3 = Cs[i]
se = list(val3)
count = 0
sb = list(val3)
SG = []
fnum = len(se) - 1
fend = len(se) - 1
for sv in range(len(sb)):
fend = len(se)
vv = sb[sv]
N = vv.isnumeric()
A = vv.isalpha()
ff = fend - sv
if A is True and count == 0:
As.append(vv)
SG.append(vv)
count = 0
fnum -= 1
if A is True and count > 0:
NNa = "".join(Ns)
SG.append(NNa)
SG.append(vv)
Ns.clear()
count = 0
fnum -= 1
if A is True and count >= 2:
NNa = "".join(Ns)
NNs.append(NNa)
Ns.clear()
SG.append(NNa)
SG.append(vv)
count = 0
fnum -= 1
if N is True and ff > 1:
Ns.append(vv)
count += 1
if N is True and ff <= 1:
Ns.append(vv)
if len(Ns) >= 2:
NNa = "".join(Ns)
NNs.append(NNa)
SG.append(NNa)
else:
SG.append(vv)
count = 0
Ns.clear()
As.clear()
val2 = str(Cs[i])
s = list(val2)
for j in range(len(SG)):
charv = SG[j]
try:
charvi = int(SG[j])
SG[j] = charv.replace('{}'.format(charvi), ('_{' + '{}'.format(charvi) + '}'))
except Exception:
pass
ss = "".join(SG)
s.clear()
fcs.append(ss)
return fcs
def rterm(Ci, a):
termi = Mul(a, Pow(Ci, abs(int(a))))
return termi
def rprod(Ci, a, Cj, b):
term1 = symbolgen.rterm(Ci, a)
term2 = symbolgen.rterm(Cj, b)
term3 = Mul(term1, term2)
return term3
def initfunc(initlist, C):
reactants = []
products = []
for i, j in enumerate(initlist):
Reactants = initlist[i]['Reactants']
Products = initlist[i]['Products']
Rvals = list(Reactants.values())
Pvals = list(Products.values())
Ks = symbols('k_{}'.format(i + 1))
Eas = symbols('Ea_{}'.format(i + 1))
RT = Mul(Symbol('R'), Symbol('T'))
RTI = Pow(RT, Integer(-1))
EART = Mul(Eas, RTI)
EARTI = Mul(EART, Integer(-1))
ee = exp(EARTI)
rterms = []
pterms = []
rtotal = Integer(1)
ptotal = Integer(1)
for k, li in zip(C, Rvals):
if li != 0:
term = symbolgen.rterm(k, li)
rterms.append(term)
for t in rterms:
rtotal = Mul(rtotal, t)
for m, n in zip(C, Pvals):
if n != 0:
pterm = symbolgen.rterm(m, n)
pterms.append(pterm)
for tt in pterms:
ptotal = Mul(ptotal, tt)
reactants.append(Mul(Ks, Mul(rtotal, ee)))
products.append(Mul(Ks, Mul(ptotal, ee)))
return [reactants, products]
def eqlist(eqlistl, R, P):
reactants = R
products = P
EQS = []
leqns = []
for i, j in enumerate(eqlistl):
Reactions = eqlistl[i]['Reactions']
Reverse = eqlistl[i]['Reverse']
Rxn = list(Reactions.values())
RxnR = list(Reverse.values())
eqn = []
Reacts = [i*j for i, j in zip(Rxn, reactants) if i != 0]
Prods = [i*j for i, j in zip(RxnR, products) if i != 0]
if not Prods:
eee = sum(Reacts)
rlatex = latex(eee)
leqns.append(rlatex)
EQS.append(eee)
else:
eqn = sum(Reacts)
peqn = sum(Prods)
eeqn = Add(eqn, peqn)
rlatex = latex(eeqn)
leqns.append(rlatex)
EQS.append(eeqn)
return [EQS, leqns]
def dislat(lnames, latexs, indvar):
Latexs = []
Displays = []
Dbs = []
for i in range(len(latexs)):
dd = '{d' + 'C_{}{}{}'.format("{", symbols(lnames[i]), "}") + '}'
dt = '{d' + '{}'.format(symbols(indvar)) + '}'
dde = r'$\dfrac{}{}'.format(dd, dt) + ' = ' + '{}$'.format(latexs[i])
ddeb = r'\dfrac{}{}'.format(dd, dt) + ' = ' + '{}'.format(latexs[i])
ddg = Latex(dde)
Latexs.append(dde)
Displays.append(ddg)
Dbs.append(ddeb)
return Displays, Latexs, Dbs
def chemeq(Cs, rxn, inits):
ceqs = []
ceqsD = []
ceqsw = []
for i in range(rxn):
Reactants = inits[i]['Reactants']
Products = inits[i]['Products']
Reverse = inits[i]['Reverse']
Rvals = list(Reactants.values())
rvals = [Rvals[kk] for kk in range(len(Rvals)) if Rvals[kk] != 0]
Rname = symbolgen.numfunc(list(Reactants.keys()))
rname = [symbols('{}'.format(Rname[h])) for h in range(len(Rname)) if Rvals[h] != 0]
Pvals = list(Products.values())
pvals = [Pvals[kk] for kk in range(len(Pvals)) if Pvals[kk] != 0]
Pname = symbolgen.numfunc(list(Products.keys()))
pname = [symbols('{}'.format(Pname[h])) for h in range(len(Pname)) if Pvals[h] != 0]
CRvals = sum([Mul(Integer(ii), jj) for ii, jj in zip(rvals, rname) if ii != 0])
CPvals = sum([Mul(Integer(ii), jj) for ii, jj in zip(pvals, pname) if ii != 0])
if Reverse == 0:
cheme = r'${} \longrightarrow {}$'.format(CRvals, CPvals)
if Reverse == 1:
cheme = r'${} \rightleftharpoons {}$'.format(CRvals, CPvals)
ceqsD.append(Latex(cheme))
ceqs.append(cheme)
if Reverse == 0:
chemw = r'{} \\longrightarrow {}'.format(CRvals, CPvals)
if Reverse == 1:
chemw = r'{} \\rightleftharpoons {}'.format(CRvals, CPvals)
ceqsw.append(chemw)
return ceqs, ceqsD, ceqsw
def rhseqs(equations, kk, ea, r):
EQLIST = []
EQLISTF = []
for ind, e in enumerate(equations):
eqn = [r'{}'.format(e).replace('{', '').replace('}', '')]
Ksyms = [symbols('k_{}'.format(i + 1)) for i in range(len(kk))]
EAsyms = [symbols('Ea_{}'.format(i + 1)) for i in range(len(ea))]
kdictionary = dict(zip(Ksyms, kk))
eadictionary = dict(zip(EAsyms, ea))
eqn3 = e.subs(kdictionary)
eqn4 = eqn3.subs(eadictionary)
eqn5 = eqn4.subs({'R': 8.31446261815324})
eqn6b = eqn5.subs({'*exp': '*sp.exp'})
EQLISTF.append(eqn6b)
EQLIST.append(eqn[0])
return EQLIST, EQLISTF
def jacobian(rhs, y):
eqnl = len(rhs)
cl = len(y)
def mfunc(i, j):
return diff(rhs[i], y[j])
J = [[i for i in range(cl)] for j in range(eqnl)]
Jf = [[sp.diff(rhs[j], y[i]) for i in range(cl)] for j in range(eqnl)]
Jn = [[i for i in range(cl)] for j in range(eqnl)]
Jm = [[i for i in range(cl)] for j in range(eqnl)]
ix, jx = symbols("ix jx")
Ja = Matrix(len(rhs), len(y), lambda i, j: mfunc(i, j))
for i in range(eqnl):
for j in range(cl):
J[i][j] = str('{}'.format('{}'.format(mfunc(i, j)).replace('*exp', '*sp.exp')))
for i in range(eqnl):
for j in range(cl):
Jn[i][j] = str('{}'.format('{}'.format(mfunc(i, j)).replace('*exp', '*np.exp')))
Jm[i][j] = str('{}'.format('{}'.format(mfunc(i, j)).replace('*exp', '*math.exp')))
MatrixJ = simplify(Matrix(Ja))
LatexMatrix = sp.latex(matrix2numpy(Matrix(Jf)))
lm = latex(MatrixJ, mode='inline', itex=True, mat_delim="(", mat_str='array')
return J, Jn, Jm, MatrixJ, lm, LatexMatrix
def sysgen(self):
equations, latexs = self.eqlist(self.Eqlist, self.reactants, self.products)
return equations
def sysdis(self):
equations, latexs = self.eqlist(self.Eqlist, self.reactants, self.products)
slatex, dlatex = self.dislat(self.nameslist, self.latexs, self.indvar)
return dlatex
def dis(self):
slatex, dlatex = self.dislat(self.nameslist, self.latexs, self.indvar)
for i in slatex:
display(i)
def gen(names, rxn, inits, eqs, intz):
Cs, Ks, EAs, Ts = symbolgen.symfunc(names, rxn)
reacts, prods = symbolgen.initfunc(inits, Cs)
equats, latexss = symbolgen.eqlist(eqs, reacts, prods)
slat, dlat = symbolgen.dislat(names, latexss, intz)
Chem, ChemD, ChemW = symbolgen.chemeq(Cs, rxn, inits)
return Cs, reacts, prods, equats, slat, dlat, Chem, ChemD, ChemW
def fullgen(names, rxn, inits, eqs, intz, filepathf, kk, ea, r, namesl):
Cs, Ks, EAs, Ts = symbolgen.symfunc(names, rxn)
reacts, prods = symbolgen.initfunc(inits, Cs)
equats, latexss = symbolgen.eqlist(eqs, reacts, prods)
slat, dlat, dlatb = symbolgen.dislat(names, latexss, intz)
Chem, ChemD, ChemW = symbolgen.chemeq(Cs, rxn, inits)
Cs.append("T")
RHS, RHSf = symbolgen.rhseqs(equats, kk, ea, r)
Jac, JacNumpy, JacMath, JacSimple, lm, latexmatrix = symbolgen.jacobian(RHSf, Cs)
JacS, JacNumpyS, JacMathS, JacSimpleS, lmS, latexmatrixS = symbolgen.jacobian(RHS, Cs)
symbolgen.psave(namesl, dlat, filepathf, dlatb)
symbolgen.csave(Chem, filepathf)
KS = [str(r"{}".format(Ks[i])) for i in range(len(Ks))]
EAS = [str(r"{}".format(EAs[i])) for i in range(len(EAs))]
EAK = KS.copy()
EAK.extend(EAS)
symbolgen.fsave(filepathf, equats, dlat, Chem, ChemW, RHS, RHSf, Jac, JacNumpy, JacMath, JacSimple, lm, latexmatrix, JacS, JacNumpyS, JacMathS, JacSimpleS, lmS, latexmatrixS, Cs, EAK, names)
return Cs, Ks, EAs, reacts, prods, equats, slat, dlat, Chem, ChemD, ChemW, RHS, RHSf, Jac, JacNumpy, JacMath, JacSimple, lm, latexmatrix, JacS, JacNumpyS, JacMathS, JacSimpleS, lmS, latexmatrixS
def psave(nameslist, LATEXD, fpath, LATEXB):
filename = fpath
Fblist = [r"\begin{align*}"]
for sa, ka in enumerate(LATEXD):
fig1 = plt.figure(frameon=False)
ax = fig1.add_axes([0, 0, 0.001, 0.001])
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
ax.set_axis_off()
ax.text(0.5 * (left + right), 0.5 * (bottom + top), ka, va='center', ha='center')
fig1.savefig(r'{}\Overall Reaction {}.svg'.format(filename, nameslist[sa]), bbox_inches='tight')
fig1.savefig(r'{}\Overall Reaction {}.pdf'.format(filename, nameslist[sa]), bbox_inches='tight')
fig1.savefig(r'{}\Overall Reaction {}.png'.format(filename, nameslist[sa]), bbox_inches='tight')
plt.close()
for sb, kb in enumerate(LATEXB):
Fblist.append(str(r'{}{}{}'.format(r"\mathbf{", kb, "}")))
Fblist.append(r"\\")
fig2 = plt.figure(frameon=False)
fig2.text(0, 0, r'${}{}{}$'.format(r"\mathbf{", kb, "}"), fontsize=25)
fig2.savefig(r'{}\Overall Reaction B {}.svg'.format(filename, nameslist[sb]), dpi=300, transparent=True, bbox_inches='tight', pad_inches=0.0)
fig2.savefig(r'{}\Overall Reaction B {}.pdf'.format(filename, nameslist[sb]), dpi=300, transparent=True, bbox_inches='tight', pad_inches=0.0)
fig2.savefig(r'{}\Overall Reaction B {}.png'.format(filename, nameslist[sb]), dpi=300, transparent=True, bbox_inches='tight', pad_inches=0.0)
plt.close()
Fblist.append(r"\end{align*}")
with open(r"{}\EquationsLatexp.txt".format(filename), "w") as output:
for eqi in Fblist:
output.write('"{}"{}'.format("{}".format(eqi), "\n"))
with open(r"{}\EquationsLatexp.txt".format(filename)) as filein, open(r"{}\EquationsLatexFinal.txt".format(filename), 'w') as fileout:
fileinl = filein.readlines()
for line in fileinl:
linef = line.replace('=', '&=')
lineff = linef.replace('}}"', r'}} \\ "')
linefff = lineff.replace('{dZ}', '{dZ}}')
lineffff = linefff.replace('}}} \\', '}} \\')
fileout.write(lineffff)
strf = (open(r"{}\EquationsLatexFinal.txt".format(filename), 'r').read())
def lf2space(s):
return " ".join(s.split("\n"))
eqf = str(lf2space(r"""{}""".format(strf))).replace('"', '')
eqfb = eqf.replace(r"\mathbf{", r"$\mathbf{")
eqfc = eqfb.replace(r"}} \\", "}}$ \n")
eqfd = eqfc.replace("&=", "=")
eqfe = eqfd.strip(r"\ \\")
eqff = eqfe.strip(r"\end{align*}")
eqfg = eqff.strip(r"\begin{align*}")
eqfh = eqfg.replace(r"\\ $", "$")
eqfj = eqfh.strip(r" \\")
eqfk = eqfj.replace(r" $\m", r"$\m")
fig3 = plt.figure(frameon=False)
plt.text(0, 0, eqf, {'color': 'black', 'fontsize': 22}, va="center", ha="left")
plt.axis('off')
plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off', labelleft='off', labeltop='off', labelright='off', labelbottom='off')
fig3.savefig(r'{}\Total Reaction.png'.format(filename), format="png", dpi=300, transparent=True, bbox_inches='tight')
# plt.show()
plt.close()
fig4 = plt.figure(frameon=False)
plt.text(0, 0, eqf, {'color': 'black', 'fontsize': 16}, va="center", ha="left")
plt.axis('off')
plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off', labelleft='off', labeltop='off', labelright='off', labelbottom='off')
fig4.savefig(r'{}\Total Reaction.pdf'.format(filename), format="pdf", dpi=300, transparent=True, bbox_inches='tight')
# plt.show()
plt.close()
new_rc_params = {'text.usetex': False,
"font.size": 12,
"svg.fonttype": 'none'}
mpl.rcParams.update(new_rc_params)
fig5 = plt.figure(frameon=False)
plt.axis('off')
plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off', labelleft='off', labeltop='off', labelright='off', labelbottom='off')
ax = fig5.add_axes([0, 0, 0.001, 0.001])
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
ax.set_axis_off()
ax.text(0.5*(left+right), 0.5*(bottom+top), eqfk, va="center", ha="left")
fig5.savefig(r'{}\Total Reaction.svg'.format(filename), bbox_inches='tight')
# plt.show()
plt.close()
def csave(LATEXC, fpath):
filename = fpath
for s, k in enumerate(LATEXC):
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 0.001, 0.001])
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
ax.set_axis_off()
ax.text(0.5*(left+right), 0.5*(bottom+top), r"$\bf{}Reaction \ {}{}: \ $".format("{", s + 1, "}") + k, va='center', ha='center')
fig.savefig(r'{}\Labelled Reaction {}.svg'.format(filename, s + 1), bbox_inches='tight')
fig.savefig(r'{}\Labelled Reaction {}.pdf'.format(filename, s + 1), bbox_inches='tight')
fig.savefig(r'{}\Labelled Reaction {}.png'.format(filename, s + 1), bbox_inches='tight')
plt.close()
for s, k in enumerate(LATEXC):
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 0.001, 0.001])
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
ax.set_axis_off()
ax.text(0.5*(left+right), 0.5*(bottom+top), k, va='center', ha='center')
fig.savefig(r'{}\Reaction {}.svg'.format(filename, s + 1), bbox_inches='tight')
fig.savefig(r'{}\Reaction {}.pdf'.format(filename, s + 1), bbox_inches='tight')
fig.savefig(r'{}\Reaction {}.png'.format(filename, s + 1), bbox_inches='tight')
plt.close()
def fsave(ffpath, eqns, eqnslat, crxns, crxnsw, rhseq, rhseqf, Jac, JacN, JacMath, JacSimple, lm, latexmatrix, JacSy, JacSyN, JacMathSy, JacSimpleSy, lmSy, latexmatrixSy, C, EAK, nameslist):
with open(r"{}\Equations.txt".format(ffpath), "w") as output:
output.write("[")
el = len(eqns)
eel = 0
for eqn in eqns:
eel += 1
if eel < el:
output.write('{},\n'.format(str(eqn)))
if eel >= el:
output.write('{}]'.format(str(eqn)))
with open(r"{}\EquationsLatex.txt".format(ffpath), "w") as output:
for eqnlat in eqnslat:
output.write('{}\n'.format(str(eqnlat)))
with open(r"{}\Equations.tex".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "$")
output.write(r"\documentclass{article}")
output.write("\n")
output.write(r"\usepackage{amsmath, nccmath, bm}")
output.write("\n")
output.write(r"\usepackage[bottom=0.2in,top=0.2in,left=0.2in,right=0.2in]{geometry}")
output.write("\n")
output.write(r"\begin{document}")
output.write("\n")
output.write(r"\begin{fleqn}")
output.write("\n")
for eqnlat in eqnslat:
output.write(r"\begin{equation}")
output.write("\n")
output.write(r"\begin{split}")
output.write("\n")
output.write('{}\n'.format(str(eqnlat).translate(removetable)))
output.write(r"\end{split}")
output.write("\n")
output.write(r"\end{equation}")
output.write("\n")
output.write(r"\\")
output.write("\n")
output.write(r"\end{fleqn}")
output.write("\n")
output.write(r"\end{document}")
with open(r"{}\ReactionsLatex.txt".format(ffpath), "w") as output:
for crxn in crxns:
output.write('{}\n'.format(str(crxn)))
with open(r"{}\ReactionsLatexWord.txt".format(ffpath), "w") as output:
for crxnw in crxnsw:
output.write('{}\n'.format(str(crxnw)))
with open(r"{}\RHSsymbols.txt".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "[]'")
removetableB = str.maketrans('', '', "[]'")
output.write("def RHS(t, y, *args):\n")
output.write(" {} = args\n".format(str("{}".format(EAK)).translate(removetable)))
output.write(" {} = y\n".format(str("{}".format(C)).translate(removetable)))
ll = len(rhseq)
eqsss = []
for rhs in rhseq:
lr = rhseq.index(rhs)
if lr < ll:
eqsss.append(str("EQ_{}".format(nameslist[rhseq.index(rhs)])))
output.write(" EQ_{} = {}\n".format(nameslist[rhseq.index(rhs)], rhs))
elif lr >= ll:
eqsss.append(str("EQ_{}".format(nameslist[rhseq.index(rhs)])))
output.write(" EQ_{} = {}\n".format(nameslist[rhseq.index(rhs)], rhs))
output.write(" return [{}]".format(("{}".format(eqsss)).translate(removetableB)))
with open(r"{}\RHS.txt".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "[]'")
removetableB = str.maketrans('', '', "[]'")
output.write("def RHS(t, y):\n")
output.write(" {} = args\n".format(str("{}".format(EAK)).translate(removetable)))
output.write(" {} = y\n".format(str("{}".format(C)).translate(removetable)))
ll = len(rhseqf)
lr = 0
eqsss = []
for rhsff in rhseqf:
lr += 1
if lr < ll:
eqsss.append(str("EQ_{}".format(nameslist[rhseqf.index(rhsff)])))
output.write(" EQ_{} = {}\n".format(nameslist[rhseqf.index(rhsff)], rhsff))
elif lr >= ll:
eqsss.append(str("EQ_{}".format(nameslist[rhseqf.index(rhsff)])))
output.write(" EQ_{} = {}\n".format(nameslist[rhseqf.index(rhsff)], rhsff))
output.write(" return [{}]".format(("{}".format(eqsss)).translate(removetableB)))
with open(r"{}\Jacobian.txt".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "[]'")
removetableB = str.maketrans('', '', "[]'")
output.write("def Jacob(t, y, *args):\n")
output.write(" {} = args\n".format(str("{}".format(EAK)).translate(removetable)))
output.write(" {} = y\n".format(str("{}".format(C)).translate(removetable)))
output.write(" Jac = [")
jj = len(JacMathSy)
jjj = 0
for i in range(len(JacMathSy)):
jjj += 1
Jrow = JacMathSy[i][:]
if i == 0:
output.write(('{},\n'.format(Jrow)).replace("'", ""))
if jjj < jj and i != 0:
output.write((' {},\n'.format(Jrow)).replace("'", ""))
elif jjj >= jj:
output.write((' {}'.format(Jrow)).replace("'", ""))
output.write("]\n")
output.write(" return Jac")
with open(r"{}\JacobianSympy.txt".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "[]'")
removetableB = str.maketrans('', '', "[]'")
output.write("def Jacob(t, y, *args):\n")
output.write(" {} = args\n".format(str("{}".format(EAK)).translate(removetable)))
output.write(" {} = y\n".format(str("{}".format(C)).translate(removetable)))
output.write(" Jac = [")
jj = len(JacSy)
jjj = 0
for i in range(len(JacSy)):
jjj += 1
Jrow = JacSy[i][:]
if i == 0:
output.write(('{},\n'.format(Jrow)).replace("'", ""))
if jjj < jj and i != 0:
output.write((' {},\n'.format(Jrow)).replace("'", ""))
elif jjj >= jj:
output.write((' {}'.format(Jrow)).replace("'", ""))
output.write("]\n")
output.write(" return Jac")
with open(r"{}\JacobianNumpy.txt".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "[]'")
removetableB = str.maketrans('', '', "[]'")
output.write("def Jacob(t, y, *args):\n")
output.write(" {} = args\n".format(str("{}".format(EAK)).translate(removetable)))
output.write(" {} = y\n".format(str("{}".format(C)).translate(removetable)))
output.write(" Jac = [")
jj = len(JacN)
jjj = 0
for i in range(len(JacN)):
jjj += 1
Jrow = JacN[i][:]
if i == 0:
output.write(('{},\n'.format(Jrow)).replace("'", ""))
if jjj < jj and i != 0:
output.write((' {},\n'.format(Jrow)).replace("'", ""))
elif jjj >= jj:
output.write((' {}'.format(Jrow)).replace("'", ""))
output.write("]\n")
output.write(" return Jac")
with open(r"{}\JacobianMatrix.txt".format(ffpath), 'w') as output:
output.write('{}'.format(JacSimple))
with open(r"{}\JacobianLatex.txt".format(ffpath), "w") as output:
output.write('{}'.format(lm))
with open(r"{}\RHS.txt".format(ffpath)) as filein, open(r"{}\RightHandSide.txt".format(ffpath), 'w') as fileout:
fileinl = filein.readlines()
lfia = len(fileinl)
lffb = 0
for line in fileinl:
lffb += 1
line = line.replace("'", "")
line = line.replace("exp", "sp.exp")
if lffb < lfia:
fileout.write('{}'.format(line))
elif lffb >= lfia:
fileout.write('{}'.format(line))
with open(r"{}\RHSsymbols.txt".format(ffpath)) as filein, open(r"{}\RightHandSideSymbols.txt".format(ffpath), 'w') as fileout:
fileinl = filein.readlines()
lfi = len(fileinl)
lff = 0
for line in fileinl:
line = line.replace("'", "")
line = line.replace("exp", "math.exp")
lff += 1
if lff < lfi:
fileout.write('{}'.format(line))
elif lff >= lfi:
fileout.write('{}'.format(line))
pickle.dumps(JacSimple)
with open(r'{}\JacobianMatrixPickle.txt'.format(ffpath), 'wb') as f:
pickle.dump(JacSimple, f)
with open(r"{}\JacobianSymbolic.txt".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "[]'")
removetableB = str.maketrans('', '', "[]'")
output.write("def Jacob(t, y, *args):\n")
output.write(" {} = args\n".format(str("{}".format(EAK)).translate(removetable)))
output.write(" {} = y\n".format(str("{}".format(C)).translate(removetable)))
output.write(" Jac = [")
jj = len(JacMathSy)
jjj = 0
for i in range(len(JacMathSy)):
jjj += 1
Jrow = JacMathSy[i][:]
if i == 0:
output.write(('{},\n'.format(Jrow)).replace("'", ""))
if jjj < jj and i != 0:
output.write((' {},\n'.format(Jrow)).replace("'", ""))
elif jjj >= jj:
output.write((' {}'.format(Jrow)).replace("'", ""))
output.write("]\n")
output.write(" return Jac")
with open(r"{}\JacobianSymbolicSympy.txt".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "[]'")
removetableB = str.maketrans('', '', "[]'")
output.write("def Jacob(t, y, *args):\n")
output.write(" {} = args\n".format(str("{}".format(EAK)).translate(removetable)))
output.write(" {} = y\n".format(str("{}".format(C)).translate(removetable)))
output.write(" Jac = [")
jj = len(JacSy)
jjj = 0
for i in range(len(JacSy)):
jjj += 1
Jrow = JacSy[i][:]
if i == 0:
output.write(('{},\n'.format(Jrow)).replace("'", ""))
if jjj < jj and i != 0:
output.write((' {},\n'.format(Jrow)).replace("'", ""))
elif jjj >= jj:
output.write((' {}'.format(Jrow)).replace("'", ""))
output.write("]\n")
output.write(" return Jac")
with open(r"{}\JacobianSymbolicNumpy.txt".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "[]'")
removetableB = str.maketrans('', '', "[]'")
output.write("def Jacob(t, y, *args):\n")
output.write(" {} = args\n".format(str("{}".format(EAK)).translate(removetable)))
output.write(" {} = y\n".format(str("{}".format(C)).translate(removetable)))
output.write(" Jac = [")
jj = len(JacSy)
jjj = 0
for i in range(len(JacSyN)):
jjj += 1
Jrow = JacSyN[i][:]
if i == 0:
output.write(('{},\n'.format(Jrow)).replace("'", ""))
if jjj < jj and i != 0:
output.write((' {},\n'.format(Jrow)).replace("'", ""))
elif jjj >= jj:
output.write((' {}'.format(Jrow)).replace("'", ""))
output.write("]\n")
output.write(" return Jac")
with open(r"{}\JacobianMatrixSymbolic.txt".format(ffpath), 'w') as output:
output.write('{}'.format(JacSimpleSy))
with open(r"{}\JacobianLatexSymbolic.txt".format(ffpath), "w") as output:
output.write('{}'.format(lmSy))
with open(r"{}\JacobianLatexSymbolic.txt".format(ffpath)) as filein, open(r"{}\Jacobian.tex".format(ffpath), "w") as output:
removetable = str.maketrans('', '', "$")
output.write(r"\documentclass{standalone}")
output.write("\n")
output.write(r"\usepackage{amsmath, nccmath, bm}")
output.write("\n")
output.write(r"\begin{document}")
output.write("\n")
fileinl = filein.readlines()
for line in fileinl:
lineb = line.replace("&", ", &")
linec = lineb.replace(r"\\", r" \\" + " \n")
output.write(linec)
output.write("\n")
output.write(r"\end{document}")
pickle.dumps(JacSimpleSy)
with open(r'{}\JacobianMatrixPickleSymbolic.txt'.format(ffpath), 'wb') as f:
pickle.dump(JacSimpleSy, f)
try:
create_pdf(r"{}\Equations.tex".format(ffpath), "{}".format(ffpath))
os.remove(r"{}\Equations.aux".format(ffpath))
os.remove(r"{}\Equations.log".format(ffpath))
except Exception:
print("Coulnd't convert Equations.tex")
pass
try:
create_pdf(r"{}\Jacobian.tex".format(ffpath), "{}".format(ffpath))
os.remove(r"{}\Jacobian.aux".format(ffpath))
os.remove(r"{}\Jacobian.log".format(ffpath))
except Exception:
print("Coulnd't convert Jacobian.tex")
pass
# Generates all necessary lists and values.
# chemical_names, number_of_reactions, Initial_reactions, Equation_list, indvdf, filepath, kvalues, ea_values, r_gas = gui.fullgui() # Generates all necessary lists and values.
# Calculates the jacobian and all other desired functions
# for key, value in locals().items():
# if callable(value) and value.__module__ == __name__:
# l.append(key)
# C_Symbols, KKS, EAS, reacts, prods, equations, slat, dlat, chem, chemD, chemw, rhs, rhsf, jac, jacnumpy, Jacmath, JacSimple, lm, latexmatrix, jacsy, jacnumpysy, jacmathsy, jacsimplesy, lmsy, latexmatrixsy = symbolgen.fullgen(chemical_names, number_of_reactions, Initial_reactions, Equation_list, indvdf, filepath, kvalues, ea_values, r_gas, chemical_names)
| 50,838
| 1,686
| 98
|
359984d2efcf31862c177b8ad21ea7529ed252f1
| 7,628
|
py
|
Python
|
tests/test_ethash.py
|
norswap/execution-specs
|
c2274790e8ac2d637c7dbe092477a1b21243916c
|
[
"CC0-1.0"
] | 1
|
2021-09-07T21:30:14.000Z
|
2021-09-07T21:30:14.000Z
|
tests/test_ethash.py
|
norswap/execution-specs
|
c2274790e8ac2d637c7dbe092477a1b21243916c
|
[
"CC0-1.0"
] | null | null | null |
tests/test_ethash.py
|
norswap/execution-specs
|
c2274790e8ac2d637c7dbe092477a1b21243916c
|
[
"CC0-1.0"
] | null | null | null |
import json
import multiprocessing as mp
import os
import pkgutil
import shutil
import subprocess
import tarfile
import tempfile
from random import randint
from typing import Tuple, cast
import pytest
import requests
from ethereum.base_types import Uint
from ethereum.crypto import keccak256
from ethereum.ethash import (
EPOCH_SIZE,
HASH_BYTES,
MIX_BYTES,
cache_size,
dataset_size,
epoch,
generate_cache,
generate_dataset_item,
generate_seed,
)
from ethereum.utils.numeric import is_prime
@pytest.mark.parametrize(
"block_number, expected_epoch",
[
(Uint(0), Uint(0)),
(Uint(29999), Uint(0)),
(Uint(30000), Uint(1)),
],
)
#
# Geth DAG related functionalities for fuzz testing
#
def test_dataset_generation_random_epoch(tmpdir: str) -> None:
"""
Generate a random epoch and obtain the DAG for that epoch from geth.
Then ensure the following 2 test scenarios:
1. The first 100 dataset indices are same when the python
implementation is compared with the DAG dataset.
2. Randomly take 500 indices between
[101, `dataset size in words` - 1] and ensure that the values are
same between python implementation and DAG dataset.
"""
download_geth(tmpdir)
epoch_number = Uint(randint(0, 100))
block_number = epoch_number * EPOCH_SIZE + randint(0, EPOCH_SIZE - 1)
generate_dag_via_geth(f"{tmpdir}/geth", block_number, f"{tmpdir}/.ethash")
seed = generate_seed(block_number)
dag_dataset = fetch_dag_data(f"{tmpdir}/.ethash", seed)
cache = generate_cache(block_number)
dataset_size_bytes = dataset_size(block_number)
dataset_size_words = dataset_size_bytes // HASH_BYTES
assert len(dag_dataset) == dataset_size_words
assert generate_dataset_item(cache, Uint(0)) == dag_dataset[0]
for i in range(100):
assert generate_dataset_item(cache, Uint(i)) == dag_dataset[i]
# Then for this dataset randomly take 5000 indices and check the
# data obtained from our implementation with geth DAG
for _ in range(500):
index = Uint(randint(101, dataset_size_words - 1))
dataset_item = generate_dataset_item(cache, index)
assert dataset_item == dag_dataset[index], index
# Manually forcing the dataset out of the memory incase the gc
# doesn't kick in immediately
del dag_dataset
| 31.00813
| 105
| 0.662166
|
import json
import multiprocessing as mp
import os
import pkgutil
import shutil
import subprocess
import tarfile
import tempfile
from random import randint
from typing import Tuple, cast
import pytest
import requests
from ethereum.base_types import Uint
from ethereum.crypto import keccak256
from ethereum.ethash import (
EPOCH_SIZE,
HASH_BYTES,
MIX_BYTES,
cache_size,
dataset_size,
epoch,
generate_cache,
generate_dataset_item,
generate_seed,
)
from ethereum.utils.numeric import is_prime
@pytest.mark.parametrize(
"block_number, expected_epoch",
[
(Uint(0), Uint(0)),
(Uint(29999), Uint(0)),
(Uint(30000), Uint(1)),
],
)
def test_epoch(block_number: Uint, expected_epoch: Uint) -> None:
assert epoch(block_number) == expected_epoch
def test_epoch_start_and_end_blocks_have_same_epoch() -> None:
for _ in range(100):
block_number = Uint(randint(10 ** 9, 2 * (10 ** 9)))
epoch_start_block_number = (block_number // EPOCH_SIZE) * EPOCH_SIZE
epoch_end_block_number = epoch_start_block_number + EPOCH_SIZE - 1
assert (
epoch(block_number)
== epoch(epoch_start_block_number)
== epoch(epoch_end_block_number)
)
def test_cache_size_1st_epoch() -> None:
assert (
cache_size(Uint(0)) == cache_size(Uint(0) + EPOCH_SIZE - 1) == 16776896
)
assert is_prime(cache_size(Uint(0)) // HASH_BYTES)
def test_cache_size_2048_epochs() -> None:
cache_size_2048_epochs = json.loads(
cast(
bytes,
pkgutil.get_data(
"ethereum", "assets/cache_sizes_2048_epochs.json"
),
).decode()
)
assert len(cache_size_2048_epochs) == 2048
for epoch_number in range(2048):
assert (
cache_size(Uint(epoch_number * EPOCH_SIZE))
== cache_size_2048_epochs[epoch_number]
)
def test_epoch_start_and_end_blocks_have_same_cache_size() -> None:
for _ in range(100):
block_number = Uint(randint(10 ** 9, 2 * (10 ** 9)))
epoch_start_block_number = (block_number // EPOCH_SIZE) * EPOCH_SIZE
epoch_end_block_number = epoch_start_block_number + EPOCH_SIZE - 1
assert (
cache_size(block_number)
== cache_size(epoch_start_block_number)
== cache_size(epoch_end_block_number)
)
def test_dataset_size_1st_epoch() -> None:
assert (
dataset_size(Uint(0))
== dataset_size(Uint(0 + EPOCH_SIZE - 1))
== 1073739904
)
assert is_prime(dataset_size(Uint(0)) // MIX_BYTES)
def test_dataset_size_2048_epochs() -> None:
dataset_size_2048_epochs = json.loads(
cast(
bytes,
pkgutil.get_data(
"ethereum", "assets/dataset_sizes_2048_epochs.json"
),
).decode()
)
assert len(dataset_size_2048_epochs) == 2048
for epoch_number in range(2048):
assert (
dataset_size(Uint(epoch_number * EPOCH_SIZE))
== dataset_size_2048_epochs[epoch_number]
)
def test_epoch_start_and_end_blocks_have_same_dataset_size() -> None:
for _ in range(100):
block_number = Uint(randint(10 ** 9, 2 * (10 ** 9)))
epoch_start_block_number = (block_number // EPOCH_SIZE) * EPOCH_SIZE
epoch_end_block_number = epoch_start_block_number + EPOCH_SIZE - 1
assert (
dataset_size(block_number)
== dataset_size(epoch_start_block_number)
== dataset_size(epoch_end_block_number)
)
def test_seed() -> None:
assert (
generate_seed(Uint(0))
== generate_seed(Uint(0 + EPOCH_SIZE - 1))
== b"\x00" * 32
)
assert (
generate_seed(Uint(EPOCH_SIZE))
== generate_seed(Uint(2 * EPOCH_SIZE - 1))
== keccak256(b"\x00" * 32)
)
# NOTE: The below bytes value was obtained by obtaining the seed for the same block number from Geth.
assert (
generate_seed(Uint(12345678))
== b"[\x8c\xa5\xaaC\x05\xae\xed<\x87\x1d\xbc\xabQBGj\xfd;\x9cJ\x98\xf6Dq\\z\xaao\x1c\xf7\x03"
)
def test_epoch_start_and_end_blocks_have_same_seed() -> None:
for _ in range(100):
block_number = Uint(randint(10000, 20000))
epoch_start_block_number = (block_number // EPOCH_SIZE) * EPOCH_SIZE
epoch_end_block_number = epoch_start_block_number + EPOCH_SIZE - 1
assert (
generate_seed(epoch_start_block_number)
== generate_seed(block_number)
== generate_seed(epoch_end_block_number)
)
#
# Geth DAG related functionalities for fuzz testing
#
def download_geth(dir: str) -> None:
geth_release_name = "geth-linux-amd64-1.10.8-26675454"
# 26 seconds to fetch Geth. 1.5 minute for each epoch dataset creation
url = f"https://gethstore.blob.core.windows.net/builds/{geth_release_name}.tar.gz"
r = requests.get(url)
with open(f"{dir}/geth.tar.gz", "wb") as f:
f.write(r.content)
geth_tar = tarfile.open(f"{dir}/geth.tar.gz")
geth_tar.extractall(dir)
shutil.move(f"{dir}/{geth_release_name}/geth", dir)
shutil.rmtree(f"{dir}/{geth_release_name}", ignore_errors=True)
os.remove(f"{dir}/geth.tar.gz")
def generate_dag_via_geth(
geth_path: str, block_number: Uint, dag_dump_dir: str
) -> None:
subprocess.call([geth_path, "makedag", str(block_number), dag_dump_dir])
def fetch_dag_data(dag_dump_dir: str, epoch_seed: bytes) -> Tuple[bytes, ...]:
dag_file_path = f"{dag_dump_dir}/full-R23-{epoch_seed.hex()[:16]}"
with open(dag_file_path, "rb") as fp:
dag_dataset = fp.read()
# The first 8 bytes are Magic Bytes and can be ignored.
dag_dataset = dag_dataset[8:]
dag_dataset_items = []
for i in range(0, len(dag_dataset), HASH_BYTES):
dag_dataset_items.append(dag_dataset[i : i + HASH_BYTES])
return tuple(dag_dataset_items)
def test_dataset_generation_random_epoch(tmpdir: str) -> None:
"""
Generate a random epoch and obtain the DAG for that epoch from geth.
Then ensure the following 2 test scenarios:
1. The first 100 dataset indices are same when the python
implementation is compared with the DAG dataset.
2. Randomly take 500 indices between
[101, `dataset size in words` - 1] and ensure that the values are
same between python implementation and DAG dataset.
"""
download_geth(tmpdir)
epoch_number = Uint(randint(0, 100))
block_number = epoch_number * EPOCH_SIZE + randint(0, EPOCH_SIZE - 1)
generate_dag_via_geth(f"{tmpdir}/geth", block_number, f"{tmpdir}/.ethash")
seed = generate_seed(block_number)
dag_dataset = fetch_dag_data(f"{tmpdir}/.ethash", seed)
cache = generate_cache(block_number)
dataset_size_bytes = dataset_size(block_number)
dataset_size_words = dataset_size_bytes // HASH_BYTES
assert len(dag_dataset) == dataset_size_words
assert generate_dataset_item(cache, Uint(0)) == dag_dataset[0]
for i in range(100):
assert generate_dataset_item(cache, Uint(i)) == dag_dataset[i]
# Then for this dataset randomly take 5000 indices and check the
# data obtained from our implementation with geth DAG
for _ in range(500):
index = Uint(randint(101, dataset_size_words - 1))
dataset_item = generate_dataset_item(cache, index)
assert dataset_item == dag_dataset[index], index
# Manually forcing the dataset out of the memory incase the gc
# doesn't kick in immediately
del dag_dataset
| 4,919
| 0
| 298
|
8d98c01ed4c6b69f27663952ca8b02969b480b52
| 536
|
py
|
Python
|
run/wsid_utils.py
|
osmanbaskaya/wsid
|
b486fb907a85981aa0d0c30210df1d0c33fcbff2
|
[
"MIT"
] | 1
|
2016-12-22T21:41:21.000Z
|
2016-12-22T21:41:21.000Z
|
run/wsid_utils.py
|
osmanbaskaya/wsid
|
b486fb907a85981aa0d0c30210df1d0c33fcbff2
|
[
"MIT"
] | 4
|
2015-06-20T14:09:35.000Z
|
2015-06-26T18:29:26.000Z
|
run/wsid_utils.py
|
osmanbaskaya/wsid
|
b486fb907a85981aa0d0c30210df1d0c33fcbff2
|
[
"MIT"
] | 1
|
2018-05-11T00:12:59.000Z
|
2018-05-11T00:12:59.000Z
|
__author__ = 'thorn'
import logging
import sys
LOGGER = logging.getLogger()
| 26.8
| 79
| 0.703358
|
__author__ = 'thorn'
import logging
import sys
LOGGER = logging.getLogger()
def prepare_logger(log_level, filename=None):
log_level = getattr(logging, log_level.upper(), None)
if filename is None:
filename = sys.stderr
LOGGER.setLevel(level=log_level)
handler = logging.StreamHandler(filename)
handler.setLevel(log_level)
formatter = logging.Formatter(
u'[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
| 436
| 0
| 23
|
e9453d60c4d848a63a4895b1d3565c53b7257b94
| 2,866
|
py
|
Python
|
03_load/load.py
|
Informasjonsforvaltning/dataservice-publisher-etl
|
1368a4036ccfe11a53bcc1d69c44b75e5b896413
|
[
"Apache-2.0"
] | null | null | null |
03_load/load.py
|
Informasjonsforvaltning/dataservice-publisher-etl
|
1368a4036ccfe11a53bcc1d69c44b75e5b896413
|
[
"Apache-2.0"
] | 1
|
2021-09-14T10:09:55.000Z
|
2021-10-01T07:19:32.000Z
|
03_load/load.py
|
Informasjonsforvaltning/dataservice-publisher-etl
|
1368a4036ccfe11a53bcc1d69c44b75e5b896413
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
from os import environ as env
import requests
from dotenv import load_dotenv
# Get environment
load_dotenv()
DATASERVICE_PUBLISHER_HOST_URL = env.get("DATASERVICE_PUBLISHER_HOST_URL")
ADMIN_USERNAME = env.get("ADMIN_USERNAME", "admin")
ADMIN_PASSWORD = env.get("ADMIN_PASSWORD")
INPUT_FILE = env.get("INPUT_FILE")
def login() -> str:
"""Logs in to get an access_token."""
url = f"{DATASERVICE_PUBLISHER_HOST_URL}/login"
try:
headers = {"Content-Type": "application/json"}
data = dict(username=ADMIN_USERNAME, password=ADMIN_PASSWORD)
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
data = response.json()
token = data["access_token"]
print(f"Successful login. Token >{token}<")
return token
else:
logging.error(f"Unsuccessful login : {response.status_code}")
return None
except Exception as e:
logging.error("Got exception", e)
return None
def delete_catalog(access_token, catalog) -> bool:
"""Tries to delete the catalog."""
headers = {
"Authorization": f"Bearer {access_token}",
}
url = catalog["identifier"]
response = requests.delete(url, headers=headers)
if response.status_code == 204:
print(f"Deleted catalog {url}")
return True
elif response.status_code == 404:
print(f"Catalog {url} does not exist. Safe to proceed")
return True
else:
logging.error(f"Unsuccessful, status_code: {response.status_code}")
# msg = json.loads(response.content)["msg"]
# logging.error(f"Unsuccessful, msg : {msg}")
logging.error(response.content)
return False
def load_catalog(access_token, catalog) -> bool:
"""Loads the catalog and returns True if successful."""
url = f"{DATASERVICE_PUBLISHER_HOST_URL}/catalogs"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {access_token}",
}
response = requests.post(url, json=catalog, headers=headers)
if response.status_code == 200:
print(
f"loaded from file {json_file.name}",
)
return True
else:
logging.error(f"Unsuccessful, status_code: {response.status_code}")
# msg = json.loads(response.content)["msg"]
# logging.error(f"Unsuccessful, msg : {msg}")
logging.error(response.content)
return False
if __name__ == "__main__":
access_token = login()
if access_token:
with open(INPUT_FILE) as json_file:
catalog = json.load(json_file)
delete_catalog(access_token, catalog)
result = load_catalog(access_token, catalog)
if result:
print(f"Successfully loaded content of {INPUT_FILE}.")
| 31.844444
| 75
| 0.64201
|
import json
import logging
from os import environ as env
import requests
from dotenv import load_dotenv
# Get environment
load_dotenv()
DATASERVICE_PUBLISHER_HOST_URL = env.get("DATASERVICE_PUBLISHER_HOST_URL")
ADMIN_USERNAME = env.get("ADMIN_USERNAME", "admin")
ADMIN_PASSWORD = env.get("ADMIN_PASSWORD")
INPUT_FILE = env.get("INPUT_FILE")
def login() -> str:
"""Logs in to get an access_token."""
url = f"{DATASERVICE_PUBLISHER_HOST_URL}/login"
try:
headers = {"Content-Type": "application/json"}
data = dict(username=ADMIN_USERNAME, password=ADMIN_PASSWORD)
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
data = response.json()
token = data["access_token"]
print(f"Successful login. Token >{token}<")
return token
else:
logging.error(f"Unsuccessful login : {response.status_code}")
return None
except Exception as e:
logging.error("Got exception", e)
return None
def delete_catalog(access_token, catalog) -> bool:
"""Tries to delete the catalog."""
headers = {
"Authorization": f"Bearer {access_token}",
}
url = catalog["identifier"]
response = requests.delete(url, headers=headers)
if response.status_code == 204:
print(f"Deleted catalog {url}")
return True
elif response.status_code == 404:
print(f"Catalog {url} does not exist. Safe to proceed")
return True
else:
logging.error(f"Unsuccessful, status_code: {response.status_code}")
# msg = json.loads(response.content)["msg"]
# logging.error(f"Unsuccessful, msg : {msg}")
logging.error(response.content)
return False
def load_catalog(access_token, catalog) -> bool:
"""Loads the catalog and returns True if successful."""
url = f"{DATASERVICE_PUBLISHER_HOST_URL}/catalogs"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {access_token}",
}
response = requests.post(url, json=catalog, headers=headers)
if response.status_code == 200:
print(
f"loaded from file {json_file.name}",
)
return True
else:
logging.error(f"Unsuccessful, status_code: {response.status_code}")
# msg = json.loads(response.content)["msg"]
# logging.error(f"Unsuccessful, msg : {msg}")
logging.error(response.content)
return False
if __name__ == "__main__":
access_token = login()
if access_token:
with open(INPUT_FILE) as json_file:
catalog = json.load(json_file)
delete_catalog(access_token, catalog)
result = load_catalog(access_token, catalog)
if result:
print(f"Successfully loaded content of {INPUT_FILE}.")
| 0
| 0
| 0
|
52b1621400649445d9209e1fd07f69c9b102cd5b
| 4,602
|
py
|
Python
|
create_model.py
|
shenalt/stroke_predictor
|
ab543200245ee51fce89f71d24e2acee74d6a152
|
[
"MIT"
] | null | null | null |
create_model.py
|
shenalt/stroke_predictor
|
ab543200245ee51fce89f71d24e2acee74d6a152
|
[
"MIT"
] | null | null | null |
create_model.py
|
shenalt/stroke_predictor
|
ab543200245ee51fce89f71d24e2acee74d6a152
|
[
"MIT"
] | null | null | null |
# Import our libraries
# Import pandas and numpy
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
# Helper function to split our data
from sklearn.model_selection import train_test_split
# Import our Logistic Regression model
from sklearn.linear_model import LogisticRegression
# Import helper functions to evaluate our model
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score, roc_auc_score
# Import z-score helper function
import scipy.stats as stats
from IPython.display import Image
# Import helper functipn for hyper-parameter tuning
from sklearn.model_selection import GridSearchCV
# Import Decision Tree
# from sklearn.tree import DecisionTreeClassifier
# Import Random Forest
from sklearn.ensemble import RandomForestClassifier
# Import metrics to score our model
from sklearn import metrics
# LOAD IN AND CLEAN UP THE DATA BEFORE MERGING
# Load in the first stroke dataset
df = pd.read_csv('https://raw.githubusercontent.com/shenalt/tissera_yasser_DS_project/main/healthcare-dataset-stroke-data.csv')
# Drop the id column
df.drop(columns=['id'], inplace=True)
# Fill the bmi null values in df
df['bmi'] = df.bmi.fillna(df.bmi.mean())
# Remove entries with gender Other from df
df = df[df['gender'] != 'Other']
# Normalize our numerical features to ensure they have equal weight when I build my classifiers
# Create a new column for normalized age
df['age_norm']=(df['age']-df['age'].min())/(df['age'].max()-df['age'].min())
# Create a new column for normalized avg glucose level
df['avg_glucose_level_norm']=(df['avg_glucose_level']-df['avg_glucose_level'].min())/(df['avg_glucose_level'].max()-df['avg_glucose_level'].min())
# Create a new column for normalized bmi
df['bmi_norm']=(df['bmi']-df['bmi'].min())/(df['bmi'].max()-df['bmi'].min())
# Load in the second stroke dataset
df2 = pd.read_csv('https://raw.githubusercontent.com/shenalt/tissera_yasser_DS_project/main/train_strokes.csv')
# Drop the id column
df2.drop(columns=['id'], inplace=True)
# Fill the bmi null values in df2
df2['bmi'] = df2.bmi.fillna(df2.bmi.mean())
# Create a new category for the smoking null values
df2['smoking_status'] = df2['smoking_status'].fillna('not known')
# Remove entries with gender Other from df2
df2 = df2[df2['gender'] != 'Other']
# Normalize our numerical features to ensure they have equal weight when I build my classifiers
# Create a new column for normalized age
df2['age_norm']=(df2['age']-df2['age'].min())/(df2['age'].max()-df2['age'].min())
# Create a new column for normalized avg glucose level
df2['avg_glucose_level_norm']=(df2['avg_glucose_level']-df2['avg_glucose_level'].min())/(df2['avg_glucose_level'].max()-df2['avg_glucose_level'].min())
# Create a new column for normalized bmi
df2['bmi_norm']=(df2['bmi']-df2['bmi'].min())/(df2['bmi'].max()-df2['bmi'].min())
# Merge the two df's
df_master = df.merge(df2, how='outer')
# EXTRACT ALL STROKE ENTRIES AND ISOLATE 1000 RANDOM NON-STROKE ENTRIES INTO A DF
# Create a df from dataset with just the stroke entries
s_df = df_master.loc[df_master['stroke'] == 1]
# Remove age outliers from s_df
s_df = s_df.loc[s_df['age'] >= 45]
# Create a df from the dataset with the no stroke entries
n_df = df_master.sample(n=1100, random_state=30)
n_df = n_df.loc[n_df['stroke'] == 0]
# Merge them
df_final = s_df.merge(n_df, how='outer')
# FEATURE ENGINEERING TIME
# Convert certain features into numerical values
df_final = pd.get_dummies(df_final, columns=['gender', 'Residence_type', 'smoking_status', 'ever_married', 'work_type'])
# Begin to train our model
selected_features = ['age', 'bmi', 'avg_glucose_level', 'hypertension', 'heart_disease']
X = df_final[selected_features]
y = df_final['stroke']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=30)
# RANDOM FOREST CLASSIFIER
# Init our Random Forest Classifier Model
#model = RandomForestClassifier()
params = {
'n_estimators' : [10, 50, 100],
'criterion' : ['gini', 'entropy'],
'max_depth': [5, 10, 100, None],
'min_samples_split': [2, 10, 100],
'max_features': ['auto', 'sqrt', 'log2']
}
grid_search_cv = GridSearchCV(
estimator=RandomForestClassifier(),
param_grid=params,
scoring='accuracy' )
# fit all combination of trees.
grid_search_cv.fit(X_train, y_train)
# the highest accuracy-score.
model = grid_search_cv.best_estimator_
# Fit our model
model.fit(X_train, y_train)
# Save our model using pickle
pickle.dump(model, open('models/rfc.pkl', 'wb') )
| 31.958333
| 151
| 0.735115
|
# Import our libraries
# Import pandas and numpy
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
# Helper function to split our data
from sklearn.model_selection import train_test_split
# Import our Logistic Regression model
from sklearn.linear_model import LogisticRegression
# Import helper functions to evaluate our model
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score, roc_auc_score
# Import z-score helper function
import scipy.stats as stats
from IPython.display import Image
# Import helper functipn for hyper-parameter tuning
from sklearn.model_selection import GridSearchCV
# Import Decision Tree
# from sklearn.tree import DecisionTreeClassifier
# Import Random Forest
from sklearn.ensemble import RandomForestClassifier
# Import metrics to score our model
from sklearn import metrics
# LOAD IN AND CLEAN UP THE DATA BEFORE MERGING
# Load in the first stroke dataset
df = pd.read_csv('https://raw.githubusercontent.com/shenalt/tissera_yasser_DS_project/main/healthcare-dataset-stroke-data.csv')
# Drop the id column
df.drop(columns=['id'], inplace=True)
# Fill the bmi null values in df
df['bmi'] = df.bmi.fillna(df.bmi.mean())
# Remove entries with gender Other from df
df = df[df['gender'] != 'Other']
# Normalize our numerical features to ensure they have equal weight when I build my classifiers
# Create a new column for normalized age
df['age_norm']=(df['age']-df['age'].min())/(df['age'].max()-df['age'].min())
# Create a new column for normalized avg glucose level
df['avg_glucose_level_norm']=(df['avg_glucose_level']-df['avg_glucose_level'].min())/(df['avg_glucose_level'].max()-df['avg_glucose_level'].min())
# Create a new column for normalized bmi
df['bmi_norm']=(df['bmi']-df['bmi'].min())/(df['bmi'].max()-df['bmi'].min())
# Load in the second stroke dataset
df2 = pd.read_csv('https://raw.githubusercontent.com/shenalt/tissera_yasser_DS_project/main/train_strokes.csv')
# Drop the id column
df2.drop(columns=['id'], inplace=True)
# Fill the bmi null values in df2
df2['bmi'] = df2.bmi.fillna(df2.bmi.mean())
# Create a new category for the smoking null values
df2['smoking_status'] = df2['smoking_status'].fillna('not known')
# Remove entries with gender Other from df2
df2 = df2[df2['gender'] != 'Other']
# Normalize our numerical features to ensure they have equal weight when I build my classifiers
# Create a new column for normalized age
df2['age_norm']=(df2['age']-df2['age'].min())/(df2['age'].max()-df2['age'].min())
# Create a new column for normalized avg glucose level
df2['avg_glucose_level_norm']=(df2['avg_glucose_level']-df2['avg_glucose_level'].min())/(df2['avg_glucose_level'].max()-df2['avg_glucose_level'].min())
# Create a new column for normalized bmi
df2['bmi_norm']=(df2['bmi']-df2['bmi'].min())/(df2['bmi'].max()-df2['bmi'].min())
# Merge the two df's
df_master = df.merge(df2, how='outer')
# EXTRACT ALL STROKE ENTRIES AND ISOLATE 1000 RANDOM NON-STROKE ENTRIES INTO A DF
# Create a df from dataset with just the stroke entries
s_df = df_master.loc[df_master['stroke'] == 1]
# Remove age outliers from s_df
s_df = s_df.loc[s_df['age'] >= 45]
# Create a df from the dataset with the no stroke entries
n_df = df_master.sample(n=1100, random_state=30)
n_df = n_df.loc[n_df['stroke'] == 0]
# Merge them
df_final = s_df.merge(n_df, how='outer')
# FEATURE ENGINEERING TIME
# Convert certain features into numerical values
df_final = pd.get_dummies(df_final, columns=['gender', 'Residence_type', 'smoking_status', 'ever_married', 'work_type'])
# Begin to train our model
selected_features = ['age', 'bmi', 'avg_glucose_level', 'hypertension', 'heart_disease']
X = df_final[selected_features]
y = df_final['stroke']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=30)
# RANDOM FOREST CLASSIFIER
# Init our Random Forest Classifier Model
#model = RandomForestClassifier()
params = {
'n_estimators' : [10, 50, 100],
'criterion' : ['gini', 'entropy'],
'max_depth': [5, 10, 100, None],
'min_samples_split': [2, 10, 100],
'max_features': ['auto', 'sqrt', 'log2']
}
grid_search_cv = GridSearchCV(
estimator=RandomForestClassifier(),
param_grid=params,
scoring='accuracy' )
# fit all combination of trees.
grid_search_cv.fit(X_train, y_train)
# the highest accuracy-score.
model = grid_search_cv.best_estimator_
# Fit our model
model.fit(X_train, y_train)
# Save our model using pickle
pickle.dump(model, open('models/rfc.pkl', 'wb') )
| 0
| 0
| 0
|
38b9046147eb7b92e42ebab58b7eac2ed12093b0
| 2,507
|
py
|
Python
|
porcupine/_ipc.py
|
rscales02/porcupine
|
91b3c90d19d2291c0a60ddb9dffac931147cde3c
|
[
"MIT"
] | null | null | null |
porcupine/_ipc.py
|
rscales02/porcupine
|
91b3c90d19d2291c0a60ddb9dffac931147cde3c
|
[
"MIT"
] | null | null | null |
porcupine/_ipc.py
|
rscales02/porcupine
|
91b3c90d19d2291c0a60ddb9dffac931147cde3c
|
[
"MIT"
] | null | null | null |
import contextlib
from multiprocessing import connection
import os
import queue
import threading
from porcupine import dirs
_ADDRESS_FILE = os.path.join(dirs.cachedir, 'ipc_address.txt')
# the addresses contain random junk so they are very unlikely to
# conflict with each other
# example addresses: r'\\.\pipe\pyc-1412-1-7hyryfd_',
# '/tmp/pymp-_lk54sed/listener-4o8n1xrc',
def send(objects):
"""Send objects from an iterable to a process running session().
Raise ConnectionRefusedError if session() is not running.
"""
raise ConnectionRefusedError
# reading the address file, connecting to a windows named pipe and
# connecting to an AF_UNIX socket all raise FileNotFoundError :D
try:
with open(_ADDRESS_FILE, 'r') as file:
address = file.read().strip()
client = connection.Client(address)
except FileNotFoundError:
raise ConnectionRefusedError("session() is not running") from None
with client:
for message in objects:
client.send(message)
def _listener2queue(listener, object_queue):
"""Accept connections. Receive and queue objects."""
while True:
try:
client = listener.accept()
except OSError:
# it's closed
break
with client:
while True:
try:
object_queue.put(client.recv())
except EOFError:
break
@contextlib.contextmanager
def session():
"""Context manager that listens for send().
Use this as a context manager:
# the queue will contain objects from send()
with session() as message_queue:
# start something that processes items in the queue and run
# the application
"""
message_queue = queue.Queue()
with connection.Listener() as listener:
with open(_ADDRESS_FILE, 'w') as file:
print(listener.address, file=file)
thread = threading.Thread(target=_listener2queue,
args=[listener, message_queue], daemon=True)
thread.start()
yield message_queue
if __name__ == '__main__':
# simple test
try:
send([1, 2, 3])
print("a server is running, a message was sent to it")
except ConnectionRefusedError:
print("a server is not running, let's become the server...")
with session() as message_queue:
while True:
print(message_queue.get())
| 29.845238
| 78
| 0.630634
|
import contextlib
from multiprocessing import connection
import os
import queue
import threading
from porcupine import dirs
_ADDRESS_FILE = os.path.join(dirs.cachedir, 'ipc_address.txt')
# the addresses contain random junk so they are very unlikely to
# conflict with each other
# example addresses: r'\\.\pipe\pyc-1412-1-7hyryfd_',
# '/tmp/pymp-_lk54sed/listener-4o8n1xrc',
def send(objects):
"""Send objects from an iterable to a process running session().
Raise ConnectionRefusedError if session() is not running.
"""
raise ConnectionRefusedError
# reading the address file, connecting to a windows named pipe and
# connecting to an AF_UNIX socket all raise FileNotFoundError :D
try:
with open(_ADDRESS_FILE, 'r') as file:
address = file.read().strip()
client = connection.Client(address)
except FileNotFoundError:
raise ConnectionRefusedError("session() is not running") from None
with client:
for message in objects:
client.send(message)
def _listener2queue(listener, object_queue):
"""Accept connections. Receive and queue objects."""
while True:
try:
client = listener.accept()
except OSError:
# it's closed
break
with client:
while True:
try:
object_queue.put(client.recv())
except EOFError:
break
@contextlib.contextmanager
def session():
"""Context manager that listens for send().
Use this as a context manager:
# the queue will contain objects from send()
with session() as message_queue:
# start something that processes items in the queue and run
# the application
"""
message_queue = queue.Queue()
with connection.Listener() as listener:
with open(_ADDRESS_FILE, 'w') as file:
print(listener.address, file=file)
thread = threading.Thread(target=_listener2queue,
args=[listener, message_queue], daemon=True)
thread.start()
yield message_queue
if __name__ == '__main__':
# simple test
try:
send([1, 2, 3])
print("a server is running, a message was sent to it")
except ConnectionRefusedError:
print("a server is not running, let's become the server...")
with session() as message_queue:
while True:
print(message_queue.get())
| 0
| 0
| 0
|
5ddb4e0b6eb17f491af6b878f4f11a4fce1b453f
| 1,700
|
py
|
Python
|
EE.py
|
SDRAST/Physics
|
09f0c44a86248844c80873922d7f38ef06421cb9
|
[
"Apache-2.0"
] | null | null | null |
EE.py
|
SDRAST/Physics
|
09f0c44a86248844c80873922d7f38ef06421cb9
|
[
"Apache-2.0"
] | null | null | null |
EE.py
|
SDRAST/Physics
|
09f0c44a86248844c80873922d7f38ef06421cb9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from Physics import eps_0, mu_0
from math import pi, sqrt
resistivity = {'copper': 1.7e-8, 'aluminum': 2.8e-8, 'iron': 1e-7,
'steel-electrical': 4.6e-7, 'steel-stainless': 6.9e-7,
'gold': 2.44e-8, 'silver': 1.68e-8,
'graphite-min': 2.5e-6, 'graphite-max': 5e-6}
permeability = {'steel-electrical': 5e-3, 'steel-stainless': 1000*mu_0,
'steel-carbon': 8.75e-4, 'copper': mu_0,
'aluminum': mu_0}
permittivity = {'metal': eps_0}
def skin_depth(omega, rho, mu=mu_0, eps=eps_0):
"""
Depth of the current layer in a conductor subject to AC fields::
J = J exp(-d/delta)
S
where J is the surface current density and delta is the skin depth::
S
Resistivity is defined so that the resistance of a bulk conductor is::
rho
R = --- L
A
where A is the cross-sectional area and L is the length.
@param omega : angular frequency (rad/s)
@type omega : float
@param mu : magnetic permeability (H/m)
@type mu : float
@param eps : electric permittivity (F/m)
@type eps : float
@param rho : resistivity (ohm-m)
@type rho : float
@return: m (float)
"""
return 1/omega/sqrt( (mu*eps/2) * (sqrt(1+(1/(rho*omega*eps))**2) -1) )
def skin_resistance(freq, rho, diam):
"""
Resistance in a 1-m thin wire.
A metal wire is assumed.
@param freq : Hz
@type freq : float
@param rho : material resistivity, ohm-m
@type rho : float
@param diam : diameter, m
@type diam : float
@return: ohm/m
"""
omega = 2*pi*freq
delta = skin_depth(omega, rho)
return rho/(pi*(diam-delta)*delta)
| 24.637681
| 73
| 0.592353
|
# -*- coding: utf-8 -*-
from Physics import eps_0, mu_0
from math import pi, sqrt
resistivity = {'copper': 1.7e-8, 'aluminum': 2.8e-8, 'iron': 1e-7,
'steel-electrical': 4.6e-7, 'steel-stainless': 6.9e-7,
'gold': 2.44e-8, 'silver': 1.68e-8,
'graphite-min': 2.5e-6, 'graphite-max': 5e-6}
permeability = {'steel-electrical': 5e-3, 'steel-stainless': 1000*mu_0,
'steel-carbon': 8.75e-4, 'copper': mu_0,
'aluminum': mu_0}
permittivity = {'metal': eps_0}
def skin_depth(omega, rho, mu=mu_0, eps=eps_0):
"""
Depth of the current layer in a conductor subject to AC fields::
J = J exp(-d/delta)
S
where J is the surface current density and delta is the skin depth::
S
Resistivity is defined so that the resistance of a bulk conductor is::
rho
R = --- L
A
where A is the cross-sectional area and L is the length.
@param omega : angular frequency (rad/s)
@type omega : float
@param mu : magnetic permeability (H/m)
@type mu : float
@param eps : electric permittivity (F/m)
@type eps : float
@param rho : resistivity (ohm-m)
@type rho : float
@return: m (float)
"""
return 1/omega/sqrt( (mu*eps/2) * (sqrt(1+(1/(rho*omega*eps))**2) -1) )
def skin_resistance(freq, rho, diam):
"""
Resistance in a 1-m thin wire.
A metal wire is assumed.
@param freq : Hz
@type freq : float
@param rho : material resistivity, ohm-m
@type rho : float
@param diam : diameter, m
@type diam : float
@return: ohm/m
"""
omega = 2*pi*freq
delta = skin_depth(omega, rho)
return rho/(pi*(diam-delta)*delta)
| 0
| 0
| 0
|
13fa079fceb6372fb93108ddbe5fa7ac5c341390
| 2,337
|
py
|
Python
|
t2/test1.py
|
thagd/matias.exe
|
87b5d425bc9a7334179b5fbce9cd5aa41caab302
|
[
"MIT"
] | 2
|
2019-10-31T03:51:49.000Z
|
2019-12-03T00:53:50.000Z
|
test1.py
|
FCChinen/lab2
|
b04b4e0788678e4de2365b74db505ef1a72b7103
|
[
"MIT"
] | null | null | null |
test1.py
|
FCChinen/lab2
|
b04b4e0788678e4de2365b74db505ef1a72b7103
|
[
"MIT"
] | 3
|
2019-09-03T00:48:16.000Z
|
2019-10-22T17:47:06.000Z
|
#!/usr/bin/env python3
import random
from mytcputils import *
from mytcp import Servidor
foi_aceita = False
rede = CamadaRede()
dst_port = random.randint(10, 1023)
servidor = Servidor(rede, dst_port)
servidor.registrar_monitor_de_conexoes_aceitas(conexao_aceita)
src_port = random.randint(1024, 0xffff)
seq_no = random.randint(0, 0xffff)
src_addr, dst_addr = '10.0.0.%d'%random.randint(1, 10), '10.0.0.%d'%random.randint(11, 20)
assert rede.fila == []
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, 0, FLAGS_SYN), src_addr, dst_addr))
assert foi_aceita, 'O monitor de conexões aceitas deveria ter sido chamado'
assert len(rede.fila) == 1
segmento, dst_addr2 = rede.fila[0]
assert fix_checksum(segmento, src_addr, dst_addr) == segmento
src_port2, dst_port2, seq_no2, ack_no2, flags2, _, _, _ = read_header(segmento)
assert 4*(flags2>>12) == len(segmento), 'O SYN+ACK não deveria ter payload'
assert dst_addr2 == src_addr
assert src_port2 == dst_port
assert dst_port2 == src_port
assert ack_no2 == seq_no + 1
assert flags2 & (FLAGS_SYN|FLAGS_ACK) == (FLAGS_SYN|FLAGS_ACK)
assert flags2 & (FLAGS_FIN|FLAGS_RST) == 0
rede.fila.clear()
src_port3 = src_port
while src_port3 == src_port:
src_port3 = random.randint(1024, 0xffff)
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port3, dst_port, seq_no, 0, FLAGS_SYN), src_addr, dst_addr))
assert len(rede.fila) == 1
segmento, dst_addr4 = rede.fila[0]
assert fix_checksum(segmento, src_addr, dst_addr) == segmento
src_port4, dst_port4, seq_no4, ack_no4, flags4, _, _, _ = read_header(segmento)
assert 4*(flags4>>12) == len(segmento), 'O SYN+ACK não deveria ter payload'
assert dst_addr4 == src_addr
assert src_port4 == dst_port
assert dst_port4 == src_port3
assert ack_no4 == seq_no + 1
assert seq_no4 != seq_no2, 'O primeiro número de sequência usado em uma conexão deveria ser aleatório'
assert flags4 & (FLAGS_SYN|FLAGS_ACK) == (FLAGS_SYN|FLAGS_ACK)
assert flags4 & (FLAGS_FIN|FLAGS_RST) == 0
| 38.95
| 123
| 0.746684
|
#!/usr/bin/env python3
import random
from mytcputils import *
from mytcp import Servidor
class CamadaRede:
def __init__(self):
self.callback = None
self.fila = []
def registrar_recebedor(self, callback):
self.callback = callback
def enviar(self, segmento, dest_addr):
self.fila.append((segmento, dest_addr))
foi_aceita = False
def conexao_aceita(conexao):
global foi_aceita
foi_aceita = True
rede = CamadaRede()
dst_port = random.randint(10, 1023)
servidor = Servidor(rede, dst_port)
servidor.registrar_monitor_de_conexoes_aceitas(conexao_aceita)
src_port = random.randint(1024, 0xffff)
seq_no = random.randint(0, 0xffff)
src_addr, dst_addr = '10.0.0.%d'%random.randint(1, 10), '10.0.0.%d'%random.randint(11, 20)
assert rede.fila == []
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port, dst_port, seq_no, 0, FLAGS_SYN), src_addr, dst_addr))
assert foi_aceita, 'O monitor de conexões aceitas deveria ter sido chamado'
assert len(rede.fila) == 1
segmento, dst_addr2 = rede.fila[0]
assert fix_checksum(segmento, src_addr, dst_addr) == segmento
src_port2, dst_port2, seq_no2, ack_no2, flags2, _, _, _ = read_header(segmento)
assert 4*(flags2>>12) == len(segmento), 'O SYN+ACK não deveria ter payload'
assert dst_addr2 == src_addr
assert src_port2 == dst_port
assert dst_port2 == src_port
assert ack_no2 == seq_no + 1
assert flags2 & (FLAGS_SYN|FLAGS_ACK) == (FLAGS_SYN|FLAGS_ACK)
assert flags2 & (FLAGS_FIN|FLAGS_RST) == 0
rede.fila.clear()
src_port3 = src_port
while src_port3 == src_port:
src_port3 = random.randint(1024, 0xffff)
rede.callback(src_addr, dst_addr, fix_checksum(make_header(src_port3, dst_port, seq_no, 0, FLAGS_SYN), src_addr, dst_addr))
assert len(rede.fila) == 1
segmento, dst_addr4 = rede.fila[0]
assert fix_checksum(segmento, src_addr, dst_addr) == segmento
src_port4, dst_port4, seq_no4, ack_no4, flags4, _, _, _ = read_header(segmento)
assert 4*(flags4>>12) == len(segmento), 'O SYN+ACK não deveria ter payload'
assert dst_addr4 == src_addr
assert src_port4 == dst_port
assert dst_port4 == src_port3
assert ack_no4 == seq_no + 1
assert seq_no4 != seq_no2, 'O primeiro número de sequência usado em uma conexão deveria ser aleatório'
assert flags4 & (FLAGS_SYN|FLAGS_ACK) == (FLAGS_SYN|FLAGS_ACK)
assert flags4 & (FLAGS_FIN|FLAGS_RST) == 0
| 218
| -4
| 123
|
28c30ab04f705a434750d94a90d626d4859129fa
| 5,245
|
py
|
Python
|
minigugl/client.py
|
FraBle/minigugl
|
2bfaab8daf1732fc3f2ed43fd743e8a1bfdb5843
|
[
"MIT"
] | null | null | null |
minigugl/client.py
|
FraBle/minigugl
|
2bfaab8daf1732fc3f2ed43fd743e8a1bfdb5843
|
[
"MIT"
] | null | null | null |
minigugl/client.py
|
FraBle/minigugl
|
2bfaab8daf1732fc3f2ed43fd743e8a1bfdb5843
|
[
"MIT"
] | null | null | null |
"""Video stream client for Raspberry Pi-powered dash cam."""
import signal
import sys
from pathlib import Path
from typing import Any, Optional
import arrow
import cv2
from loguru import logger
from vidgear.gears import VideoGear, WriteGear
from minigugl import annotation, config
from minigugl.log import setup_logging
if config.settings.enable_gps:
from minigugl import location # noqa: WPS433
setup_logging(
log_level=config.settings.log_level,
log_format=config.settings.log_format,
)
opencv_options = {
'CAP_PROP_FRAME_WIDTH': config.settings.video_width,
'CAP_PROP_FRAME_HEIGHT': config.settings.video_height,
'CAP_PROP_FPS': config.settings.video_framerate,
}
stream = VideoGear(
source=config.settings.video_source,
**opencv_options,
).start()
# https://trac.ffmpeg.org/wiki/Encode/H.264
# https://www.ffmpeg.org/ffmpeg-all.html#Codec-Options
ffmpeg_options = {
'-c:v': config.settings.video_codec,
'-map': 0, # map all streams from the first input to output
'-segment_time': config.settings.video_segment_length_sec,
'-g': config.settings.video_framerate, # group of picture (GOP) size = fps
'-sc_threshold': 0, # disable scene detection
'-force_key_frames': 'expr:gte(t,n_forced*{0})'.format(
# force key frame every x seconds
config.settings.video_segment_length_sec,
),
# use `-clones` for `-f` parameter since WriteGear internally applies
# critical '-f rawvideo' parameter to every FFmpeg pipeline
'-clones': ['-f', 'segment'], # enable segment muxer
'-input_framerate': config.settings.video_framerate,
'-r': config.settings.video_framerate, # output framerate
'-pix_fmt': 'yuv420p', # for output to work in QuickTime
'-reset_timestamps': 1, # reset timestamps at beginning of each segment
'-strftime': 1, # expand the segment filename with localtime
}
if config.settings.video_codec == 'libx264':
ffmpeg_options.update({
'-crf': 22, # constant rate factor, decides quality
'-preset': 'fast', # preset for encoding speed/compression ratio
'-tune': 'zerolatency', # fast encoding and low-latency streaming
})
Path(config.settings.output_dir).mkdir(parents=True, exist_ok=True)
writer = WriteGear(
# Example: video_2021-04-14_20-15-30.mp4
# April 14th, 2021, at 8:15:30pm
output_filename=str(
Path(
config.settings.output_dir,
) / 'video_%Y-%m-%d_%H-%M-%S.mp4', # noqa: WPS323
),
logging=True,
**ffmpeg_options,
)
def _signal_handler(signalnum: int, _: Any) -> None:
"""Handle signal from user interruption (e.g. CTRL+C).
Logs an error message and exits with non-zero exit code. Args are ignored.
Args:
signalnum: Recevied signal number.
"""
logger.info('Received signal: {0}', signal.Signals(signalnum).name)
# safely close video stream & writer
stream.stop()
writer.close()
sys.exit(0)
# Register handler for (keyboard) interrupts
signal.signal(signal.SIGINT, _signal_handler)
signal.signal(signal.SIGTERM, _signal_handler)
if __name__ == '__main__':
if config.settings.enable_gps:
gps_coordinates = location.start_gps_thread()
while True:
frame = stream.read() # read frames from stream
# check for frame if None-type
if frame is None:
break
# explicit conversion of color space because of
# https://github.com/opencv/opencv/issues/18120
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# add text annotations: timestamp and optionally GPS coordinates
img = _add_text_annotations(
img,
bottom_left=arrow.now().format(arrow.FORMAT_RFC2822),
bottom_right=(
str(gps_coordinates)
if config.settings.enable_gps
else None
),
)
# conversion back to original color space
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
writer.write(img)
| 31.596386
| 79
| 0.666921
|
"""Video stream client for Raspberry Pi-powered dash cam."""
import signal
import sys
from pathlib import Path
from typing import Any, Optional
import arrow
import cv2
from loguru import logger
from vidgear.gears import VideoGear, WriteGear
from minigugl import annotation, config
from minigugl.log import setup_logging
if config.settings.enable_gps:
from minigugl import location # noqa: WPS433
setup_logging(
log_level=config.settings.log_level,
log_format=config.settings.log_format,
)
opencv_options = {
'CAP_PROP_FRAME_WIDTH': config.settings.video_width,
'CAP_PROP_FRAME_HEIGHT': config.settings.video_height,
'CAP_PROP_FPS': config.settings.video_framerate,
}
stream = VideoGear(
source=config.settings.video_source,
**opencv_options,
).start()
# https://trac.ffmpeg.org/wiki/Encode/H.264
# https://www.ffmpeg.org/ffmpeg-all.html#Codec-Options
ffmpeg_options = {
'-c:v': config.settings.video_codec,
'-map': 0, # map all streams from the first input to output
'-segment_time': config.settings.video_segment_length_sec,
'-g': config.settings.video_framerate, # group of picture (GOP) size = fps
'-sc_threshold': 0, # disable scene detection
'-force_key_frames': 'expr:gte(t,n_forced*{0})'.format(
# force key frame every x seconds
config.settings.video_segment_length_sec,
),
# use `-clones` for `-f` parameter since WriteGear internally applies
# critical '-f rawvideo' parameter to every FFmpeg pipeline
'-clones': ['-f', 'segment'], # enable segment muxer
'-input_framerate': config.settings.video_framerate,
'-r': config.settings.video_framerate, # output framerate
'-pix_fmt': 'yuv420p', # for output to work in QuickTime
'-reset_timestamps': 1, # reset timestamps at beginning of each segment
'-strftime': 1, # expand the segment filename with localtime
}
if config.settings.video_codec == 'libx264':
ffmpeg_options.update({
'-crf': 22, # constant rate factor, decides quality
'-preset': 'fast', # preset for encoding speed/compression ratio
'-tune': 'zerolatency', # fast encoding and low-latency streaming
})
Path(config.settings.output_dir).mkdir(parents=True, exist_ok=True)
writer = WriteGear(
# Example: video_2021-04-14_20-15-30.mp4
# April 14th, 2021, at 8:15:30pm
output_filename=str(
Path(
config.settings.output_dir,
) / 'video_%Y-%m-%d_%H-%M-%S.mp4', # noqa: WPS323
),
logging=True,
**ffmpeg_options,
)
def _signal_handler(signalnum: int, _: Any) -> None:
"""Handle signal from user interruption (e.g. CTRL+C).
Logs an error message and exits with non-zero exit code. Args are ignored.
Args:
signalnum: Recevied signal number.
"""
logger.info('Received signal: {0}', signal.Signals(signalnum).name)
# safely close video stream & writer
stream.stop()
writer.close()
sys.exit(0)
# Register handler for (keyboard) interrupts
signal.signal(signal.SIGINT, _signal_handler)
signal.signal(signal.SIGTERM, _signal_handler)
def _add_text_annotations(
img: Any,
top_left: Optional[str] = None,
top_right: Optional[str] = None,
bottom_left: Optional[str] = None,
bottom_right: Optional[str] = None,
) -> Any:
if not any([top_left, top_right, bottom_left, bottom_right]):
return img
alpha = 0.7 # opacity level
overlay = img.copy() # to allow opacity
override_text_height = config.settings.annotation_override_text_height
if top_left:
annotation.add_annotation_top_left(
frame=overlay,
text=top_left,
override_text_height=override_text_height,
)
if top_right:
annotation.add_annotation_top_right(
overlay,
top_right,
override_text_height=override_text_height,
)
if bottom_left:
annotation.add_annotation_bottom_left(
overlay,
bottom_left,
override_text_height=override_text_height,
)
if bottom_right:
annotation.add_annotation_bottom_right(
overlay,
bottom_right,
override_text_height=override_text_height,
)
return cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
if __name__ == '__main__':
if config.settings.enable_gps:
gps_coordinates = location.start_gps_thread()
while True:
frame = stream.read() # read frames from stream
# check for frame if None-type
if frame is None:
break
# explicit conversion of color space because of
# https://github.com/opencv/opencv/issues/18120
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# add text annotations: timestamp and optionally GPS coordinates
img = _add_text_annotations(
img,
bottom_left=arrow.now().format(arrow.FORMAT_RFC2822),
bottom_right=(
str(gps_coordinates)
if config.settings.enable_gps
else None
),
)
# conversion back to original color space
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
writer.write(img)
| 1,195
| 0
| 23
|
11e709083c545777ba448930810585dfa41d391b
| 1,770
|
py
|
Python
|
mjpegserverpython.py
|
bootrino/maryjane
|
a1d7a3376a7a03e195e31221f473beeea33b895f
|
[
"MIT"
] | 63
|
2021-10-06T11:11:56.000Z
|
2022-03-27T06:15:09.000Z
|
mjpegserverpython.py
|
bootrino/maryjane
|
a1d7a3376a7a03e195e31221f473beeea33b895f
|
[
"MIT"
] | 1
|
2021-10-08T20:31:28.000Z
|
2021-10-08T20:52:54.000Z
|
mjpegserverpython.py
|
bootrino/maryjane
|
a1d7a3376a7a03e195e31221f473beeea33b895f
|
[
"MIT"
] | 6
|
2021-10-08T04:54:48.000Z
|
2022-03-27T06:15:23.000Z
|
from sanic import response, Sanic
import asyncio
import timeit
# MaryJane is an mjpeg server - it works by fetching *the same* jpeg image over and over from a ram drive
# MIT license
# copyright 2021 Andrew Stuart andrew.stuart@supercoders.com.au
app = Sanic(__name__)
@app.route('/maryjane/')
if __name__ == '__main__':
try:
app.run(host="0.0.0.0", port=8080)
except KeyboardInterrupt:
print("Received KeyboardInterrupt, exiting")
| 37.659574
| 105
| 0.660452
|
from sanic import response, Sanic
import asyncio
import timeit
# MaryJane is an mjpeg server - it works by fetching *the same* jpeg image over and over from a ram drive
# MIT license
# copyright 2021 Andrew Stuart andrew.stuart@supercoders.com.au
app = Sanic(__name__)
def package_mjpeg(img_bytes):
return (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + img_bytes + b'\r\n')
@app.route('/maryjane/')
async def mjpeg_server(request):
fps = 15 # frames per second
frame_milliseconds_budget = 1000 / fps
# 15fps = frame_milliseconds_budget of 66.66
# 20fps = frame_milliseconds_budget of 50
# 30fps = frame_milliseconds_budget of 33.33
# 60fps = frame_milliseconds_budget of 16.66
async def stream_mjpeg(response):
while True:
start = timeit.default_timer()
with open('/dev/shm/img.jpeg', mode='rb') as file: # b is important -> binary
await response.write(package_mjpeg(file.read()))
stop = timeit.default_timer()
elapsed = stop - start
milliseconds_taken = elapsed * 1000
difference = frame_milliseconds_budget - milliseconds_taken
if (difference) > 0:
# i.e. if this frame was completed MORE QUICKLY than needed to maintain FPS
# don't continue because that would be a FPS higher than the FPS
# instead sleep the the remaining time budget
await asyncio.sleep(difference / 1000)
return response.stream(stream_mjpeg, content_type='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
try:
app.run(host="0.0.0.0", port=8080)
except KeyboardInterrupt:
print("Received KeyboardInterrupt, exiting")
| 1,266
| 0
| 44
|
6756dee5d1d8dd52ec0505aaa5dcd97179cf6a96
| 3,483
|
py
|
Python
|
test/unit/fixtures_adt_package.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | 42
|
2019-01-16T13:16:09.000Z
|
2022-03-23T20:13:00.000Z
|
test/unit/fixtures_adt_package.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | 59
|
2019-02-23T07:16:56.000Z
|
2022-02-28T09:34:47.000Z
|
test/unit/fixtures_adt_package.py
|
jakub-vaclavik-sap/sapcli
|
a0f40c3b2363bba0d34f705d92dd420d9adf3987
|
[
"Apache-2.0"
] | 22
|
2019-03-12T08:15:44.000Z
|
2022-01-10T03:56:47.000Z
|
GET_PACKAGE_ADT_XML='''<?xml version="1.0" encoding="utf-8"?>
<pak:package xmlns:pak="http://www.sap.com/adt/packages" xmlns:adtcore="http://www.sap.com/adt/core" adtcore:masterLanguage="EN" adtcore:name="$IAMTHEKING" adtcore:type="DEVC/K" adtcore:changedAt="2019-01-29T23:00:00Z" adtcore:version="active" adtcore:createdAt="2019-01-29T23:00:00Z" adtcore:changedBy="DEVELOPER" adtcore:description="This is a package" adtcore:descriptionTextLimit="60" adtcore:language="EN">
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/vit/wb/object_type/devck/object_name/%24IAMTHEKING" rel="self" type="application/vnd.sap.sapgui" title="Representation in SAP Gui"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/applicationcomponents" rel="applicationcomponents" type="application/vnd.sap.adt.nameditems.v1+xml" title="Application Components Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/softwarecomponents" rel="softwarecomponents" type="application/vnd.sap.adt.nameditems.v1+xml" title="Software Components Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/transportlayers" rel="transportlayers" type="application/vnd.sap.adt.nameditems.v1+xml" title="Transport Layers Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/translationrelevances" rel="translationrelevances" type="application/vnd.sap.adt.nameditems.v1+xml" title="Transport Relevances Value Help"/>
<pak:attributes pak:packageType="development" pak:isPackageTypeEditable="false" pak:isAddingObjectsAllowed="false" pak:isAddingObjectsAllowedEditable="true" pak:isEncapsulated="false" pak:isEncapsulationEditable="false" pak:recordChanges="false" pak:isRecordChangesEditable="false" pak:isSwitchVisible="false"/>
<pak:superPackage/>
<pak:applicationComponent pak:name="-" pak:description="No application component assigned" pak:isVisible="true" pak:isEditable="false"/>
<pak:transport>
<pak:softwareComponent pak:name="LOCAL" pak:description="" pak:isVisible="true" pak:isEditable="false"/>
<pak:transportLayer pak:name="" pak:description="" pak:isVisible="false" pak:isEditable="false"/>
</pak:transport>
<pak:useAccesses pak:isVisible="false"/>
<pak:packageInterfaces pak:isVisible="false"/>
<pak:subPackages>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_doc" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_DOC" adtcore:description="Documentation stuff"/>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_src" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_SRC" adtcore:description="Production source codes"/>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_tests" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_TESTS" adtcore:description="Package with Tests"/>
</pak:subPackages>
</pak:package>
'''
GET_PACKAGE_ADT_XML_NOT_FOUND='''<?xml version="1.0" encoding="utf-8"?>
<exc:exception xmlns:exc="http://www.sap.com/abapxml/types/communicationframework">
<namespace id="com.sap.adt"/>
<type id="ExceptionResourceNotFound"/>
<message lang="EN">Error while importing object PKG_NAME from the database.</message>
<localizedMessage lang="EN">Error while importing object PKG_NAME from the database.</localizedMessage>
<properties/>
</exc:exception>
'''.replace('\n', '').replace('\r', '')
| 102.441176
| 411
| 0.765145
|
GET_PACKAGE_ADT_XML='''<?xml version="1.0" encoding="utf-8"?>
<pak:package xmlns:pak="http://www.sap.com/adt/packages" xmlns:adtcore="http://www.sap.com/adt/core" adtcore:masterLanguage="EN" adtcore:name="$IAMTHEKING" adtcore:type="DEVC/K" adtcore:changedAt="2019-01-29T23:00:00Z" adtcore:version="active" adtcore:createdAt="2019-01-29T23:00:00Z" adtcore:changedBy="DEVELOPER" adtcore:description="This is a package" adtcore:descriptionTextLimit="60" adtcore:language="EN">
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/vit/wb/object_type/devck/object_name/%24IAMTHEKING" rel="self" type="application/vnd.sap.sapgui" title="Representation in SAP Gui"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/applicationcomponents" rel="applicationcomponents" type="application/vnd.sap.adt.nameditems.v1+xml" title="Application Components Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/softwarecomponents" rel="softwarecomponents" type="application/vnd.sap.adt.nameditems.v1+xml" title="Software Components Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/transportlayers" rel="transportlayers" type="application/vnd.sap.adt.nameditems.v1+xml" title="Transport Layers Value Help"/>
<atom:link xmlns:atom="http://www.w3.org/2005/Atom" href="/sap/bc/adt/packages/valuehelps/translationrelevances" rel="translationrelevances" type="application/vnd.sap.adt.nameditems.v1+xml" title="Transport Relevances Value Help"/>
<pak:attributes pak:packageType="development" pak:isPackageTypeEditable="false" pak:isAddingObjectsAllowed="false" pak:isAddingObjectsAllowedEditable="true" pak:isEncapsulated="false" pak:isEncapsulationEditable="false" pak:recordChanges="false" pak:isRecordChangesEditable="false" pak:isSwitchVisible="false"/>
<pak:superPackage/>
<pak:applicationComponent pak:name="-" pak:description="No application component assigned" pak:isVisible="true" pak:isEditable="false"/>
<pak:transport>
<pak:softwareComponent pak:name="LOCAL" pak:description="" pak:isVisible="true" pak:isEditable="false"/>
<pak:transportLayer pak:name="" pak:description="" pak:isVisible="false" pak:isEditable="false"/>
</pak:transport>
<pak:useAccesses pak:isVisible="false"/>
<pak:packageInterfaces pak:isVisible="false"/>
<pak:subPackages>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_doc" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_DOC" adtcore:description="Documentation stuff"/>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_src" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_SRC" adtcore:description="Production source codes"/>
<pak:packageRef adtcore:uri="/sap/bc/adt/packages/%24iamtheking_tests" adtcore:type="DEVC/K" adtcore:name="$IAMTHEKING_TESTS" adtcore:description="Package with Tests"/>
</pak:subPackages>
</pak:package>
'''
GET_PACKAGE_ADT_XML_NOT_FOUND='''<?xml version="1.0" encoding="utf-8"?>
<exc:exception xmlns:exc="http://www.sap.com/abapxml/types/communicationframework">
<namespace id="com.sap.adt"/>
<type id="ExceptionResourceNotFound"/>
<message lang="EN">Error while importing object PKG_NAME from the database.</message>
<localizedMessage lang="EN">Error while importing object PKG_NAME from the database.</localizedMessage>
<properties/>
</exc:exception>
'''.replace('\n', '').replace('\r', '')
| 0
| 0
| 0
|
8c5626a13188adaf0ae62f7ee28e2672a0df440b
| 24,186
|
py
|
Python
|
doctorAI.py
|
raghutata143/Healthcare_Analytics
|
bc48e1cb0e89d2956d3808517cf4cf84cc7517bd
|
[
"BSD-3-Clause"
] | 1
|
2021-02-23T05:10:17.000Z
|
2021-02-23T05:10:17.000Z
|
doctorAI.py
|
raghutata143/Healthcare_Analytics
|
bc48e1cb0e89d2956d3808517cf4cf84cc7517bd
|
[
"BSD-3-Clause"
] | null | null | null |
doctorAI.py
|
raghutata143/Healthcare_Analytics
|
bc48e1cb0e89d2956d3808517cf4cf84cc7517bd
|
[
"BSD-3-Clause"
] | 1
|
2021-02-23T05:10:18.000Z
|
2021-02-23T05:10:18.000Z
|
#################################################################
# Code written by Edward Choi (mp2893@gatech.edu)
# For bug report, please contact author using the email address
#################################################################
import sys, random
import numpy as np
import cPickle as pickle
from collections import OrderedDict
import argparse
import theano
import theano.tensor as T
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
hiddenDimSize = [int(strDim) for strDim in args.hidden_dim_size[1:-1].split(',')]
if args.predict_time and args.time_file == '':
print 'Cannot predict time duration without time file'
sys.exit()
train_doctorAI(
seqFile=args.seq_file,
inputDimSize=args.n_input_codes,
labelFile=args.label_file,
numClass=args.n_output_codes,
outFile=args.out_file,
timeFile=args.time_file,
predictTime=args.predict_time,
tradeoff=args.tradeoff,
useLogTime=args.use_log_time,
embFile=args.embed_file,
embSize=args.embed_size,
embFineTune=args.embed_finetune,
hiddenDimSize=hiddenDimSize,
batchSize=args.batch_size,
max_epochs=args.n_epochs,
L2_output=args.L2_softmax,
L2_time=args.L2_time,
dropout_rate=args.dropout_rate,
logEps=args.log_eps,
verbose=args.verbose
)
| 46.156489
| 351
| 0.717398
|
#################################################################
# Code written by Edward Choi (mp2893@gatech.edu)
# For bug report, please contact author using the email address
#################################################################
import sys, random
import numpy as np
import cPickle as pickle
from collections import OrderedDict
import argparse
import theano
import theano.tensor as T
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
def unzip(zipped):
new_params = OrderedDict()
for key, value in zipped.iteritems():
new_params[key] = value.get_value()
return new_params
def numpy_floatX(data):
return np.asarray(data, dtype=config.floatX)
def load_embedding(infile):
Wemb = np.array(pickle.load(open(infile, 'rb'))).astype(config.floatX)
return Wemb
def init_params(options):
params = OrderedDict()
timeFile = options['timeFile']
embFile = options['embFile']
embSize = options['embSize']
inputDimSize = options['inputDimSize']
numClass = options['numClass']
if len(embFile) > 0:
print 'using external code embedding'
params['W_emb'] = load_embedding(embFile)
embSize = params['W_emb'].shape[1]
else:
print 'using randomly initialized code embedding'
params['W_emb'] = np.random.uniform(-0.01, 0.01, (inputDimSize, embSize)).astype(config.floatX)
params['b_emb'] = np.zeros(embSize).astype(config.floatX)
prevDimSize = embSize
if len(timeFile) > 0: prevDimSize += 1 #We need to consider an extra dimension for the duration information
for count, hiddenDimSize in enumerate(options['hiddenDimSize']):
params['W_'+str(count)] = np.random.uniform(-0.01, 0.01, (prevDimSize, hiddenDimSize)).astype(config.floatX)
params['W_r_'+str(count)] = np.random.uniform(-0.01, 0.01, (prevDimSize, hiddenDimSize)).astype(config.floatX)
params['W_z_'+str(count)] = np.random.uniform(-0.01, 0.01, (prevDimSize, hiddenDimSize)).astype(config.floatX)
params['U_'+str(count)] = np.random.uniform(-0.01, 0.01, (hiddenDimSize, hiddenDimSize)).astype(config.floatX)
params['U_r_'+str(count)] = np.random.uniform(-0.01, 0.01, (hiddenDimSize, hiddenDimSize)).astype(config.floatX)
params['U_z_'+str(count)] = np.random.uniform(-0.01, 0.01, (hiddenDimSize, hiddenDimSize)).astype(config.floatX)
params['b_'+str(count)] = np.zeros(hiddenDimSize).astype(config.floatX)
params['b_r_'+str(count)] = np.zeros(hiddenDimSize).astype(config.floatX)
params['b_z_'+str(count)] = np.zeros(hiddenDimSize).astype(config.floatX)
prevDimSize = hiddenDimSize
params['W_output'] = np.random.uniform(-0.01, 0.01, (prevDimSize, numClass)).astype(config.floatX)
params['b_output'] = np.zeros(numClass).astype(config.floatX)
if options['predictTime']:
params['W_time'] = np.random.uniform(-0.01, 0.01, (prevDimSize, 1)).astype(config.floatX)
params['b_time'] = np.zeros(1).astype(config.floatX)
return params
def init_tparams(params, options):
tparams = OrderedDict()
for key, value in params.iteritems():
if not options['embFineTune'] and key == 'W_emb': continue
tparams[key] = theano.shared(value, name=key)
return tparams
def dropout_layer(state_before, use_noise, trng, dropout_rate):
proj = T.switch(use_noise, (state_before * trng.binomial(state_before.shape, p=dropout_rate, n=1, dtype=state_before.dtype)), state_before * 0.5)
return proj
def gru_layer(tparams, emb, layerIndex, hiddenDimSize, mask=None):
timesteps = emb.shape[0]
if emb.ndim == 3: n_samples = emb.shape[1]
else: n_samples = 1
W_rx = T.dot(emb, tparams['W_r_'+layerIndex])
W_zx = T.dot(emb, tparams['W_z_'+layerIndex])
Wx = T.dot(emb, tparams['W_'+layerIndex])
def stepFn(stepMask, wrx, wzx, wx, h):
r = T.nnet.sigmoid(wrx + T.dot(h, tparams['U_r_'+layerIndex]) + tparams['b_r_'+layerIndex])
z = T.nnet.sigmoid(wzx + T.dot(h, tparams['U_z_'+layerIndex]) + tparams['b_z_'+layerIndex])
h_tilde = T.tanh(wx + T.dot(r*h, tparams['U_'+layerIndex]) + tparams['b_'+layerIndex])
h_new = z * h + ((1. - z) * h_tilde)
h_new = stepMask[:, None] * h_new + (1. - stepMask)[:, None] * h
return h_new
results, updates = theano.scan(fn=stepFn, sequences=[mask,W_rx,W_zx,Wx], outputs_info=T.alloc(numpy_floatX(0.0), n_samples, hiddenDimSize), name='gru_layer'+layerIndex, n_steps=timesteps)
return results
def build_model(tparams, options, W_emb=None):
trng = RandomStreams(123)
use_noise = theano.shared(numpy_floatX(0.))
if len(options['timeFile']) > 0: useTime = True
else: useTime = False
x = T.tensor3('x', dtype=config.floatX)
t = T.matrix('t', dtype=config.floatX)
y = T.tensor3('y', dtype=config.floatX)
t_label = T.matrix('t_label', dtype=config.floatX)
mask = T.matrix('mask', dtype=config.floatX)
lengths = T.vector('lengths', dtype=config.floatX)
n_timesteps = x.shape[0]
n_samples = x.shape[1]
if options['embFineTune']: emb = T.tanh(T.dot(x, tparams['W_emb']) + tparams['b_emb'])
else: emb = T.tanh(T.dot(x, W_emb) + tparams['b_emb'])
if useTime:
emb = T.concatenate([t.reshape([n_timesteps,n_samples,1]), emb], axis=2) #Adding the time element to the embedding
inputVector = emb
for i, hiddenDimSize in enumerate(options['hiddenDimSize']):
memories = gru_layer(tparams, inputVector, str(i), hiddenDimSize, mask=mask)
memories = dropout_layer(memories, use_noise, trng, options['dropout_rate'])
inputVector = memories
def softmaxStep(memory2d):
return T.nnet.softmax(T.dot(memory2d, tparams['W_output']) + tparams['b_output'])
logEps = options['logEps']
results, updates = theano.scan(fn=softmaxStep, sequences=[inputVector], outputs_info=None, name='softmax_layer', n_steps=n_timesteps)
results = results * mask[:,:,None]
cross_entropy = -(y * T.log(results + logEps) + (1. - y) * T.log(1. - results + logEps))
prediction_loss = cross_entropy.sum(axis=2).sum(axis=0) / lengths
if options['predictTime']:
duration = T.maximum(T.dot(inputVector, tparams['W_time']) + tparams['b_time'], 0) #ReLU
duration = duration.reshape([n_timesteps,n_samples]) * mask
duration_loss = 0.5 * ((duration - t_label) ** 2).sum(axis=0) / lengths
cost = T.mean(prediction_loss) + options['tradeoff'] * T.mean(duration_loss) + options['L2_output'] * (tparams['W_output'] ** 2).sum() + options['L2_time'] * (tparams['W_time'] ** 2).sum()
else:
cost = T.mean(prediction_loss) + options['L2_output'] * (tparams['W_output'] ** 2).sum()
if options['predictTime']: return use_noise, x, y, t, t_label, mask, lengths, cost
elif useTime: return use_noise, x, y, t, mask, lengths, cost
else: return use_noise, x, y, mask, lengths, cost
def adadelta(tparams, grads, x, y, mask, lengths, cost, options, t=None, t_label=None):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_grad' % k) for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rup2' % k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rgrad2' % k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
if options['predictTime']:
f_grad_shared = theano.function([x, y, t, t_label, mask, lengths], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
elif len(options['timeFile']) > 0:
f_grad_shared = theano.function([x, y, t, mask, lengths], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
else:
f_grad_shared = theano.function([x, y, mask, lengths], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([], [], updates=ru2up + param_up, on_unused_input='ignore', name='adadelta_f_update')
return f_grad_shared, f_update
def padMatrixWithTimePrediction(seqs, labels, times, options):
lengths = np.array([len(seq) for seq in seqs]) - 1
n_samples = len(seqs)
maxlen = np.max(lengths)
inputDimSize = options['inputDimSize']
numClass = options['numClass']
x = np.zeros((maxlen, n_samples, inputDimSize)).astype(config.floatX)
y = np.zeros((maxlen, n_samples, numClass)).astype(config.floatX)
t = np.zeros((maxlen, n_samples)).astype(config.floatX)
t_label = np.zeros((maxlen, n_samples)).astype(config.floatX)
mask = np.zeros((maxlen, n_samples)).astype(config.floatX)
for idx, (seq,time,label) in enumerate(zip(seqs,times,labels)):
for xvec, subseq in zip(x[:,idx,:], seq[:-1]):
xvec[subseq] = 1.
for yvec, subseq in zip(y[:,idx,:], label[1:]):
yvec[subseq] = 1.
mask[:lengths[idx], idx] = 1.
t[:lengths[idx], idx] = time[:-1]
t_label[:lengths[idx], idx] = time[1:]
lengths = np.array(lengths, dtype=config.floatX)
if options['useLogTime']:
t = np.log(t + options['logEps'])
t_label = np.log(t_label + options['logEps'])
return x, y, t, t_label, mask, lengths
def padMatrixWithTime(seqs, labels, times, options):
lengths = np.array([len(seq) for seq in seqs]) - 1
n_samples = len(seqs)
maxlen = np.max(lengths)
inputDimSize = options['inputDimSize']
numClass = options['numClass']
x = np.zeros((maxlen, n_samples, inputDimSize)).astype(config.floatX)
y = np.zeros((maxlen, n_samples, numClass)).astype(config.floatX)
t = np.zeros((maxlen, n_samples)).astype(config.floatX)
mask = np.zeros((maxlen, n_samples)).astype(config.floatX)
for idx, (seq,time,label) in enumerate(zip(seqs,times,labels)):
for xvec, subseq in zip(x[:,idx,:], seq[:-1]):
xvec[subseq] = 1.
for yvec, subseq in zip(y[:,idx,:], label[1:]):
yvec[subseq] = 1.
mask[:lengths[idx], idx] = 1.
t[:lengths[idx], idx] = time[:-1]
lengths = np.array(lengths, dtype=config.floatX)
if options['useLogTime']:
t = np.log(t + options['logEps'])
return x, y, t, mask, lengths
def padMatrixWithoutTime(seqs, labels, options):
lengths = np.array([len(seq) for seq in seqs]) - 1
n_samples = len(seqs)
maxlen = np.max(lengths)
inputDimSize = options['inputDimSize']
numClass = options['numClass']
x = np.zeros((maxlen, n_samples, inputDimSize)).astype(config.floatX)
y = np.zeros((maxlen, n_samples, numClass)).astype(config.floatX)
mask = np.zeros((maxlen, n_samples)).astype(config.floatX)
for idx, (seq,label) in enumerate(zip(seqs,labels)):
for xvec, subseq in zip(x[:,idx,:], seq[:-1]):
xvec[subseq] = 1.
for yvec, subseq in zip(y[:,idx,:], label[1:]):
yvec[subseq] = 1.
mask[:lengths[idx], idx] = 1.
lengths = np.array(lengths, dtype=config.floatX)
return x, y, mask, lengths
def load_data(seqFile, labelFile, timeFile):
train_set_x = pickle.load(open(seqFile+'.train', 'rb'))
valid_set_x = pickle.load(open(seqFile+'.valid', 'rb'))
test_set_x = pickle.load(open(seqFile+'.test', 'rb'))
train_set_y = pickle.load(open(labelFile+'.train', 'rb'))
valid_set_y = pickle.load(open(labelFile+'.valid', 'rb'))
test_set_y = pickle.load(open(labelFile+'.test', 'rb'))
train_set_t = None
valid_set_t = None
test_set_t = None
if len(timeFile) > 0:
train_set_t = pickle.load(open(timeFile+'.train', 'rb'))
valid_set_t = pickle.load(open(timeFile+'.valid', 'rb'))
test_set_t = pickle.load(open(timeFile+'.test', 'rb'))
'''For debugging purposes
sequences = np.array(pickle.load(open(seqFile, 'rb')))
labels = np.array(pickle.load(open(labelFile, 'rb')))
if len(timeFile) > 0:
times = np.array(pickle.load(open(timeFile, 'rb')))
dataSize = len(labels)
np.random.seed(0)
ind = np.random.permutation(dataSize)
nTest = int(0.15 * dataSize)
nValid = int(0.10 * dataSize)
test_indices = ind[:nTest]
valid_indices = ind[nTest:nTest+nValid]
train_indices = ind[nTest+nValid:]
train_set_x = sequences[train_indices]
train_set_y = labels[train_indices]
test_set_x = sequences[test_indices]
test_set_y = labels[test_indices]
valid_set_x = sequences[valid_indices]
valid_set_y = labels[valid_indices]
train_set_t = None
test_set_t = None
valid_set_t = None
if len(timeFile) > 0:
train_set_t = times[train_indices]
test_set_t = times[test_indices]
valid_set_t = times[valid_indices]
'''
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
train_sorted_index = len_argsort(train_set_x)
train_set_x = [train_set_x[i] for i in train_sorted_index]
train_set_y = [train_set_y[i] for i in train_sorted_index]
valid_sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in valid_sorted_index]
valid_set_y = [valid_set_y[i] for i in valid_sorted_index]
test_sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in test_sorted_index]
test_set_y = [test_set_y[i] for i in test_sorted_index]
if len(timeFile) > 0:
train_set_t = [train_set_t[i] for i in train_sorted_index]
valid_set_t = [valid_set_t[i] for i in valid_sorted_index]
test_set_t = [test_set_t[i] for i in test_sorted_index]
train_set = (train_set_x, train_set_y, train_set_t)
valid_set = (valid_set_x, valid_set_y, valid_set_t)
test_set = (test_set_x, test_set_y, test_set_t)
return train_set, valid_set, test_set
def calculate_auc(test_model, dataset, options):
inputDimSize = options['inputDimSize']
numClass = options['numClass']
batchSize = options['batchSize']
useTime = options['useTime']
predictTime = options['predictTime']
n_batches = int(np.ceil(float(len(dataset[0])) / float(batchSize)))
aucSum = 0.0
dataCount = 0.0
for index in xrange(n_batches):
batchX = dataset[0][index*batchSize:(index+1)*batchSize]
batchY = dataset[1][index*batchSize:(index+1)*batchSize]
if predictTime:
batchT = dataset[2][index*batchSize:(index+1)*batchSize]
x, y, t, t_label, mask, lengths = padMatrixWithTimePrediction(batchX, batchY, batchT, options)
auc = test_model(x, y, t, t_label, mask, lengths)
elif useTime:
batchT = dataset[2][index*batchSize:(index+1)*batchSize]
x, y, t, mask, lengths = padMatrixWithTime(batchX, batchY, batchT, options)
auc = test_model(x, y, t, mask, lengths)
else:
x, y, mask, lengths = padMatrixWithoutTime(batchX, batchY, options)
auc = test_model(x, y, mask, lengths)
aucSum += auc * len(batchX)
dataCount += float(len(batchX))
return aucSum / dataCount
def train_doctorAI(
seqFile='seqFile.txt',
inputDimSize=20000,
labelFile='labelFile.txt',
numClass=500,
outFile='outFile.txt',
timeFile='timeFile.txt',
predictTime=False,
tradeoff=1.0,
useLogTime=True,
embFile='embFile.txt',
embSize=200,
embFineTune=True,
hiddenDimSize=[200,200],
batchSize=100,
max_epochs=10,
L2_output=0.001,
L2_time=0.001,
dropout_rate=0.5,
logEps=1e-8,
verbose=False
):
options = locals().copy()
if len(timeFile) > 0: useTime = True
else: useTime = False
options['useTime'] = useTime
print 'Initializing the parameters ... ',
params = init_params(options)
tparams = init_tparams(params, options)
print 'Building the model ... ',
f_grad_shared = None
f_update = None
if predictTime and embFineTune:
print 'predicting duration, fine-tuning code representations'
use_noise, x, y, t, t_label, mask, lengths, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options, t, t_label)
elif predictTime and not embFineTune:
print 'predicting duration, not fine-tuning code representations'
W_emb = theano.shared(params['W_emb'], name='W_emb')
use_noise, x, y, t, t_label, mask, lengths, cost = build_model(tparams, options, W_emb)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options, t, t_label)
elif useTime and embFineTune:
print 'using duration information, fine-tuning code representations'
use_noise, x, y, t, mask, lengths, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options, t)
elif useTime and not embFineTune:
print 'using duration information, not fine-tuning code representations'
W_emb = theano.shared(params['W_emb'], name='W_emb')
use_noise, x, y, t, mask, lengths, cost = build_model(tparams, options, W_emb)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options, t)
elif not useTime and embFineTune:
print 'not using duration information, fine-tuning code representations'
use_noise, x, y, mask, lengths, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options)
elif useTime and not embFineTune:
print 'not using duration information, not fine-tuning code representations'
W_emb = theano.shared(params['W_emb'], name='W_emb')
use_noise, x, y, mask, lengths, cost = build_model(tparams, options, W_emb)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, x, y, mask, lengths, cost, options)
print 'Loading data ... ',
trainSet, validSet, testSet = load_data(seqFile, labelFile, timeFile)
n_batches = int(np.ceil(float(len(trainSet[0])) / float(batchSize)))
print 'done'
if predictTime: test_model = theano.function(inputs=[x, y, t, t_label, mask, lengths], outputs=cost, name='test_model')
elif useTime: test_model = theano.function(inputs=[x, y, t, mask, lengths], outputs=cost, name='test_model')
else: test_model = theano.function(inputs=[x, y, mask, lengths], outputs=cost, name='test_model')
bestValidCrossEntropy = 1e20
bestValidEpoch = 0
testCrossEntropy = 0.0
print 'Optimization start !!'
for epoch in xrange(max_epochs):
iteration = 0
costVector = []
for index in random.sample(range(n_batches), n_batches):
use_noise.set_value(1.)
batchX = trainSet[0][index*batchSize:(index+1)*batchSize]
batchY = trainSet[1][index*batchSize:(index+1)*batchSize]
if predictTime:
batchT = trainSet[2][index*batchSize:(index+1)*batchSize]
x, y, t, t_label, mask, lengths = padMatrixWithTimePrediction(batchX, batchY, batchT, options)
cost = f_grad_shared(x, y, t, t_label, mask, lengths)
elif useTime:
batchT = trainSet[2][index*batchSize:(index+1)*batchSize]
x, y, t, mask, lengths = padMatrixWithTime(batchX, batchY, batchT, options)
cost = f_grad_shared(x, y, t, mask, lengths)
else:
x, y, mask, lengths = padMatrixWithoutTime(batchX, batchY, options)
cost = f_grad_shared(x, y, mask, lengths)
costVector.append(cost)
f_update()
if (iteration % 10 == 0) and verbose: print 'epoch:%d, iteration:%d/%d, cost:%f' % (epoch, iteration, n_batches, cost)
iteration += 1
print 'epoch:%d, mean_cost:%f' % (epoch, np.mean(costVector))
use_noise.set_value(0.)
validAuc = calculate_auc(test_model, validSet, options)
print 'Validation cross entropy:%f at epoch:%d' % (validAuc, epoch)
if validAuc < bestValidCrossEntropy:
bestValidCrossEntropy = validAuc
bestValidEpoch = epoch
bestParams = unzip(tparams)
testCrossEntropy = calculate_auc(test_model, testSet, options)
print 'Test cross entropy:%f at epoch:%d' % (testCrossEntropy, epoch)
tempParams = unzip(tparams)
np.savez_compressed(outFile + '.' + str(epoch), **tempParams)
print 'The best valid cross entropy:%f at epoch:%d' % (bestValidCrossEntropy, bestValidEpoch)
print 'The test cross entropy: %f' % testCrossEntropy
def parse_arguments(parser):
parser.add_argument('seq_file', type=str, metavar='<visit_file>', help='The path to the Pickled file containing visit information of patients')
parser.add_argument('n_input_codes', type=int, metavar='<n_input_codes>', help='The number of unique input medical codes')
parser.add_argument('label_file', type=str, metavar='<label_file>', help='The path to the Pickled file containing label information of patients')
parser.add_argument('n_output_codes', type=int, metavar='<n_output_codes>', help='The number of unique label medical codes')
parser.add_argument('out_file', metavar='out_file', help='The path to the output models. The models will be saved after every epoch')
parser.add_argument('--time_file', type=str, default='', help='The path to the Pickled file containing durations between visits of patients. If you are not using duration information, do not use this option')
parser.add_argument('--predict_time', type=int, default=0, choices=[0,1], help='Use this option if you want the GRU to also predict the time duration until the next visit (0 for false, 1 for true) (default value: 0)')
parser.add_argument('--tradeoff', type=float, default=1.0, help='Tradeoff variable for balancing the two loss functions: code prediction function and duration prediction function (default value: 1.0)')
parser.add_argument('--use_log_time', type=int, default=1, choices=[0,1], help='Use logarithm of time duration to dampen the impact of the outliers (0 for false, 1 for true) (default value: 1)')
parser.add_argument('--embed_file', type=str, default='', help='The path to the Pickled file containing the representation vectors of medical codes. If you are not using medical code representations, do not use this option')
parser.add_argument('--embed_size', type=int, default=200, help='The size of the visit embedding before passing it to the GRU layers. If you are not providing your own medical code vectors, you must specify this value (default value: 200)')
parser.add_argument('--embed_finetune', type=int, default=1, choices=[0,1], help='If you are using randomly initialized code representations, always use this option. If you are using an external medical code representations, and you want to fine-tune them as you train the GRU, use this option as well. (0 for false, 1 for true) (default value: 1)')
parser.add_argument('--hidden_dim_size', type=str, default='[200,200]', help='The size of the hidden layers of the GRU. This is a string argument. For example, [500,400] means you are using a two-layer GRU where the lower layer uses a 500-dimensional hidden layer, and the upper layer uses a 400-dimensional hidden layer. (default value: [200,200])')
parser.add_argument('--batch_size', type=int, default=100, help='The size of a single mini-batch (default value: 100)')
parser.add_argument('--n_epochs', type=int, default=10, help='The number of training epochs (default value: 10)')
parser.add_argument('--L2_softmax', type=float, default=0.001, help='L2 regularization for the softmax function (default value: 0.001)')
parser.add_argument('--L2_time', type=float, default=0.001, help='L2 regularization for the linear regression (default value: 0.001)')
parser.add_argument('--dropout_rate', type=float, default=0.5, help='Dropout rate between GRU hidden layers, and between the final hidden layer and the softmax layer (default value: 0.5)')
parser.add_argument('--log_eps', type=float, default=1e-8, help='A small value to prevent log(0) (default value: 1e-8)')
parser.add_argument('--verbose', action='store_true', help='Print output after every 10 mini-batches (default false)')
args = parser.parse_args()
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
hiddenDimSize = [int(strDim) for strDim in args.hidden_dim_size[1:-1].split(',')]
if args.predict_time and args.time_file == '':
print 'Cannot predict time duration without time file'
sys.exit()
train_doctorAI(
seqFile=args.seq_file,
inputDimSize=args.n_input_codes,
labelFile=args.label_file,
numClass=args.n_output_codes,
outFile=args.out_file,
timeFile=args.time_file,
predictTime=args.predict_time,
tradeoff=args.tradeoff,
useLogTime=args.use_log_time,
embFile=args.embed_file,
embSize=args.embed_size,
embFineTune=args.embed_finetune,
hiddenDimSize=hiddenDimSize,
batchSize=args.batch_size,
max_epochs=args.n_epochs,
L2_output=args.L2_softmax,
L2_time=args.L2_time,
dropout_rate=args.dropout_rate,
logEps=args.log_eps,
verbose=args.verbose
)
| 22,410
| 0
| 369
|
43ab7978cca3e92874497f2176611c1173d40ade
| 2,511
|
py
|
Python
|
python/pyserial-3.0/test/test_iolib.py
|
gotnone/hwa
|
4648cf6072a06552d22cbf6498b35f3e24ce38d5
|
[
"BSD-3-Clause"
] | 25
|
2015-08-05T12:36:24.000Z
|
2021-03-26T01:51:58.000Z
|
python/pyserial-3.0/test/test_iolib.py
|
gotnone/hwa
|
4648cf6072a06552d22cbf6498b35f3e24ce38d5
|
[
"BSD-3-Clause"
] | 2
|
2018-09-13T05:45:37.000Z
|
2020-06-18T15:00:51.000Z
|
python/pyserial-3.0/test/test_iolib.py
|
gotnone/hwa
|
4648cf6072a06552d22cbf6498b35f3e24ce38d5
|
[
"BSD-3-Clause"
] | 4
|
2016-09-18T08:58:35.000Z
|
2020-07-16T11:43:29.000Z
|
#! /usr/bin/env python
#
# This file is part of pySerial - Cross platform serial port support for Python
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Some tests for the serial module.
Part of pyserial (http://pyserial.sf.net) (C)2001-2009 cliechti@gmx.net
Intended to be run on different platforms, to ensure portability of
the code.
This modules contains test for the interaction between Serial and the io
library. This only works on Python 2.6+ that introduced the io library.
For all these tests a simple hardware is required.
Loopback HW adapter:
Shortcut these pin pairs:
TX <-> RX
RTS <-> CTS
DTR <-> DSR
On a 9 pole DSUB these are the pins (2-3) (4-6) (7-8)
"""
import unittest
import sys
if __name__ == '__main__' and sys.version_info < (2, 6):
sys.stderr.write("""\
==============================================================================
WARNING: this test is intended for Python 2.6 and newer where the io library
is available. This seems to be an older version of Python running.
Continuing anyway...
==============================================================================
""")
import io
import serial
# trick to make that this test run under 2.6 and 3.x without modification.
# problem is, io library on 2.6 does NOT accept type 'str' and 3.x doesn't
# like u'nicode' strings with the prefix and it is not providing an unicode
# function ('str' is now what 'unicode' used to be)
if sys.version_info >= (3, 0):
# on which port should the tests be performed:
PORT = 0
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
import sys
sys.stdout.write(__doc__)
if len(sys.argv) > 1:
PORT = sys.argv[1]
sys.stdout.write("Testing port: %r\n" % PORT)
sys.argv[1:] = ['-v']
# When this module is executed from the command-line, it runs all its tests
unittest.main()
| 31.78481
| 79
| 0.621266
|
#! /usr/bin/env python
#
# This file is part of pySerial - Cross platform serial port support for Python
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Some tests for the serial module.
Part of pyserial (http://pyserial.sf.net) (C)2001-2009 cliechti@gmx.net
Intended to be run on different platforms, to ensure portability of
the code.
This modules contains test for the interaction between Serial and the io
library. This only works on Python 2.6+ that introduced the io library.
For all these tests a simple hardware is required.
Loopback HW adapter:
Shortcut these pin pairs:
TX <-> RX
RTS <-> CTS
DTR <-> DSR
On a 9 pole DSUB these are the pins (2-3) (4-6) (7-8)
"""
import unittest
import sys
if __name__ == '__main__' and sys.version_info < (2, 6):
sys.stderr.write("""\
==============================================================================
WARNING: this test is intended for Python 2.6 and newer where the io library
is available. This seems to be an older version of Python running.
Continuing anyway...
==============================================================================
""")
import io
import serial
# trick to make that this test run under 2.6 and 3.x without modification.
# problem is, io library on 2.6 does NOT accept type 'str' and 3.x doesn't
# like u'nicode' strings with the prefix and it is not providing an unicode
# function ('str' is now what 'unicode' used to be)
if sys.version_info >= (3, 0):
def unicode(x): return x
# on which port should the tests be performed:
PORT = 0
class Test_SerialAndIO(unittest.TestCase):
def setUp(self):
self.s = serial.serial_for_url(PORT, timeout=1)
#~ self.io = io.TextIOWrapper(self.s)
self.io = io.TextIOWrapper(io.BufferedRWPair(self.s, self.s))
def tearDown(self):
self.s.close()
def test_hello_raw(self):
self.io.write(b"hello\n".decode('utf-8'))
self.io.flush() # it is buffering. required to get the data out
hello = self.io.readline()
self.assertEqual(hello, b"hello\n".decode('utf-8'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
import sys
sys.stdout.write(__doc__)
if len(sys.argv) > 1:
PORT = sys.argv[1]
sys.stdout.write("Testing port: %r\n" % PORT)
sys.argv[1:] = ['-v']
# When this module is executed from the command-line, it runs all its tests
unittest.main()
| 412
| 21
| 130
|
3b6e34ed6a4874e788fb9246884ac62a399aee1c
| 844
|
py
|
Python
|
settings/views.py
|
hudecof/bacchus
|
1d7bafa2331535b27b336b42f07f8fe328f6d131
|
[
"Apache-2.0"
] | 1
|
2020-04-15T14:31:48.000Z
|
2020-04-15T14:31:48.000Z
|
settings/views.py
|
hudecof/bacchus
|
1d7bafa2331535b27b336b42f07f8fe328f6d131
|
[
"Apache-2.0"
] | 4
|
2019-04-13T08:35:51.000Z
|
2019-04-13T15:08:47.000Z
|
settings/views.py
|
hudecof/bacchus
|
1d7bafa2331535b27b336b42f07f8fe328f6d131
|
[
"Apache-2.0"
] | 1
|
2019-03-25T07:48:29.000Z
|
2019-03-25T07:48:29.000Z
|
from django.shortcuts import render
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.core import serializers
# Create your views here.
from django.http import HttpResponse
from django.template import loader
from django.views.generic import ListView
from models import *
@login_required(login_url="/settings/")
| 32.461538
| 100
| 0.726303
|
from django.shortcuts import render
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.core import serializers
# Create your views here.
from django.http import HttpResponse
from django.template import loader
from django.views.generic import ListView
from models import *
@login_required(login_url="/settings/")
def settings(request):
settings_saved = False
if request.method == "POST":
settings = Settings.objects.all()
for setting in settings:
setting.value = request.POST[setting.parameter]
setting.save()
settings_saved = True
settings = Settings.objects.all()
return render(request,'settings.html',{'settings': settings, 'settings_saved': settings_saved })
| 414
| 0
| 22
|
862ff91599547aadbc1b7ad7559acc383025966c
| 504
|
py
|
Python
|
tinder_api/utils/config.py
|
peterchun2000/TinderBioBot
|
bf968e1c4923a4d7b201df3b5bd5b9a950a81156
|
[
"MIT"
] | null | null | null |
tinder_api/utils/config.py
|
peterchun2000/TinderBioBot
|
bf968e1c4923a4d7b201df3b5bd5b9a950a81156
|
[
"MIT"
] | null | null | null |
tinder_api/utils/config.py
|
peterchun2000/TinderBioBot
|
bf968e1c4923a4d7b201df3b5bd5b9a950a81156
|
[
"MIT"
] | null | null | null |
with open("tinder_api/utils/token.txt", "r") as f:
tinder_token = f.read()
# it is best for you to write in the token to save yourself the file I/O
# especially if you have python byte code off
#tinder_token = ""
headers = {
'app_version': '6.9.4',
'platform': 'ios',
'content-type': 'application/json',
'User-agent': 'Tinder/7.5.3 (iPohone; iOS 10.3.2; Scale/2.00)',
'X-Auth-Token': 'enter_auth_token',
}
host = 'https://api.gotinder.com'
if __name__ == '__main__':
pass
| 25.2
| 72
| 0.640873
|
with open("tinder_api/utils/token.txt", "r") as f:
tinder_token = f.read()
# it is best for you to write in the token to save yourself the file I/O
# especially if you have python byte code off
#tinder_token = ""
headers = {
'app_version': '6.9.4',
'platform': 'ios',
'content-type': 'application/json',
'User-agent': 'Tinder/7.5.3 (iPohone; iOS 10.3.2; Scale/2.00)',
'X-Auth-Token': 'enter_auth_token',
}
host = 'https://api.gotinder.com'
if __name__ == '__main__':
pass
| 0
| 0
| 0
|
5d0bd51242233aa324559b7eb615f363cf83b4a4
| 3,141
|
py
|
Python
|
infoblox_netmri/api/remote/models/issue_list_device_remote.py
|
infobloxopen/infoblox_netmri
|
aa1c744df7e439dbe163bb9edd165e4e85a9771b
|
[
"Apache-2.0"
] | 12
|
2016-02-19T12:37:54.000Z
|
2022-03-04T20:11:08.000Z
|
infoblox_netmri/api/remote/models/issue_list_device_remote.py
|
azinfoblox/infoblox-netmri
|
02372c5231e2677ab6299cb659a73c9a41b4b0f4
|
[
"Apache-2.0"
] | 18
|
2015-11-12T18:37:00.000Z
|
2021-05-19T07:59:55.000Z
|
infoblox_netmri/api/remote/models/issue_list_device_remote.py
|
azinfoblox/infoblox-netmri
|
02372c5231e2677ab6299cb659a73c9a41b4b0f4
|
[
"Apache-2.0"
] | 18
|
2016-01-07T12:04:34.000Z
|
2022-03-31T11:05:41.000Z
|
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class IssueListDeviceRemote(RemoteModel):
"""
This table list out the entries of issues in the device.
| ``IssueTimestamp:`` The date and time this issue list device record was collected or calculated.
| ``attribute type:`` datetime
| ``DeviceID:`` The internal NetMRI identifier for the device from which issue list device information was collected.
| ``attribute type:`` number
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``Count:`` The total number of issues in the list device captured within NetMRI.
| ``attribute type:`` number
| ``Adds:`` Added a new type of issue in the list device.
| ``attribute type:`` string
| ``Deletes:`` Remove an issue from the list device.
| ``attribute type:`` string
| ``Same:`` Maintain the issues as in the list device.
| ``attribute type:`` string
| ``Suppressed:`` A flag indicating whether this issue is suppressed or not.
| ``attribute type:`` string
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this interface.
| ``attribute type:`` string
| ``Timestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``EndTime:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
| ``TotalCount:`` The total number of issues occured in each device.
| ``attribute type:`` number
| ``Component:`` The issue component (Devices, Configuration, VLANs, etc.).
| ``attribute type:`` string
| ``SeverityID:`` The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting.
| ``attribute type:`` number
| ``SeverityName:`` The severity name in the issue list device.
| ``attribute type:`` string
| ``Correctness:`` The correctness contribution for this issue.
| ``attribute type:`` string
| ``Stability:`` The stability contribution for this issue.
| ``attribute type:`` string
| ``Status:`` A status of the issues in the device.
| ``attribute type:`` string
"""
properties = ("IssueTimestamp",
"DeviceID",
"DataSourceID",
"Count",
"Adds",
"Deletes",
"Same",
"Suppressed",
"FirstSeen",
"Timestamp",
"EndTime",
"TotalCount",
"Component",
"SeverityID",
"SeverityName",
"Correctness",
"Stability",
"Status",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceID": self.DeviceID})
| 33.063158
| 122
| 0.587393
|
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class IssueListDeviceRemote(RemoteModel):
"""
This table list out the entries of issues in the device.
| ``IssueTimestamp:`` The date and time this issue list device record was collected or calculated.
| ``attribute type:`` datetime
| ``DeviceID:`` The internal NetMRI identifier for the device from which issue list device information was collected.
| ``attribute type:`` number
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``Count:`` The total number of issues in the list device captured within NetMRI.
| ``attribute type:`` number
| ``Adds:`` Added a new type of issue in the list device.
| ``attribute type:`` string
| ``Deletes:`` Remove an issue from the list device.
| ``attribute type:`` string
| ``Same:`` Maintain the issues as in the list device.
| ``attribute type:`` string
| ``Suppressed:`` A flag indicating whether this issue is suppressed or not.
| ``attribute type:`` string
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this interface.
| ``attribute type:`` string
| ``Timestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``EndTime:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
| ``TotalCount:`` The total number of issues occured in each device.
| ``attribute type:`` number
| ``Component:`` The issue component (Devices, Configuration, VLANs, etc.).
| ``attribute type:`` string
| ``SeverityID:`` The issue severity ID (1 = Error, 2 = Warning, 3 = Info). Useful for sorting.
| ``attribute type:`` number
| ``SeverityName:`` The severity name in the issue list device.
| ``attribute type:`` string
| ``Correctness:`` The correctness contribution for this issue.
| ``attribute type:`` string
| ``Stability:`` The stability contribution for this issue.
| ``attribute type:`` string
| ``Status:`` A status of the issues in the device.
| ``attribute type:`` string
"""
properties = ("IssueTimestamp",
"DeviceID",
"DataSourceID",
"Count",
"Adds",
"Deletes",
"Same",
"Suppressed",
"FirstSeen",
"Timestamp",
"EndTime",
"TotalCount",
"Component",
"SeverityID",
"SeverityName",
"Correctness",
"Stability",
"Status",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceID": self.DeviceID})
| 0
| 0
| 0
|
3560d99e871344d4aeeafd4a7011d5815fffa435
| 250
|
py
|
Python
|
accounts/referrals/exceptions.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | 2
|
2022-01-24T23:30:18.000Z
|
2022-01-26T00:21:22.000Z
|
accounts/referrals/exceptions.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | null | null | null |
accounts/referrals/exceptions.py
|
Revibe-Music/core-services
|
6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2
|
[
"MIT"
] | null | null | null |
"""
"""
from revibe.exceptions import RevibeException
# -----------------------------------------------------------------------------
| 19.230769
| 79
| 0.532
|
"""
"""
from revibe.exceptions import RevibeException
# -----------------------------------------------------------------------------
class BaseReferralException(RevibeException):
pass
class ReferralException(BaseReferralException):
pass
| 0
| 68
| 46
|
d7a3fe0014580a864aab1eeba100ec0239d99108
| 413
|
py
|
Python
|
projects/fred-snyder-examples/windows-utils/auto_shutdown_windows.py
|
fred-snyder/notes-on-python
|
1837a8999ea0d736ff3804417524b8d8cb1955e6
|
[
"MIT"
] | 1
|
2022-03-15T22:03:45.000Z
|
2022-03-15T22:03:45.000Z
|
projects/fred-snyder-examples/windows-utils/auto_shutdown_windows.py
|
fred-snyder/notes-on-python
|
1837a8999ea0d736ff3804417524b8d8cb1955e6
|
[
"MIT"
] | null | null | null |
projects/fred-snyder-examples/windows-utils/auto_shutdown_windows.py
|
fred-snyder/notes-on-python
|
1837a8999ea0d736ff3804417524b8d8cb1955e6
|
[
"MIT"
] | null | null | null |
"""
Automatically shutdown windows after a user defined time period
"""
import subprocess
from time import sleep
print("")
print("Auto Shutdown Windows")
print("=====================")
print("")
seconds = float(raw_input("Specify time in minutes: ")) * 60.0
print("")
print(">>> Computer will force shutdown automatically in %s minutes") %(seconds/60.0)
sleep(seconds)
subprocess.call(["shutdown", "/s", "/f"])
| 24.294118
| 85
| 0.670702
|
"""
Automatically shutdown windows after a user defined time period
"""
import subprocess
from time import sleep
print("")
print("Auto Shutdown Windows")
print("=====================")
print("")
seconds = float(raw_input("Specify time in minutes: ")) * 60.0
print("")
print(">>> Computer will force shutdown automatically in %s minutes") %(seconds/60.0)
sleep(seconds)
subprocess.call(["shutdown", "/s", "/f"])
| 0
| 0
| 0
|
6ed144d022c7f8b2c0e85c46561ea4c039a0e079
| 720
|
py
|
Python
|
Binary Search/744. Find Smallest Letter Greater Than Target.py
|
beckswu/Leetcode
|
480e8dc276b1f65961166d66efa5497d7ff0bdfd
|
[
"MIT"
] | 138
|
2020-02-08T05:25:26.000Z
|
2021-11-04T11:59:28.000Z
|
Binary Search/744. Find Smallest Letter Greater Than Target.py
|
beckswu/Leetcode
|
480e8dc276b1f65961166d66efa5497d7ff0bdfd
|
[
"MIT"
] | null | null | null |
Binary Search/744. Find Smallest Letter Greater Than Target.py
|
beckswu/Leetcode
|
480e8dc276b1f65961166d66efa5497d7ff0bdfd
|
[
"MIT"
] | 24
|
2021-01-02T07:18:43.000Z
|
2022-03-20T08:17:54.000Z
|
"""
744. Find Smallest Letter Greater Than Target
Input:
letters = ["c", "f", "j"]
target = "a"
Output: "c"
Input:
letters = ["c", "f", "j"]
target = "c"
Output: "f"
Input:
letters = ["c", "f", "j"]
target = "d"
Output: "f"
Input:
letters = ["c", "f", "j"]
target = "g"
Output: "j"
Input:
letters = ["c", "f", "j"]
target = "j"
Output: "c"
Input:
letters = ["c", "f", "j"]
target = "k"
Output: "c"
"""
| 16.744186
| 98
| 0.519444
|
"""
744. Find Smallest Letter Greater Than Target
Input:
letters = ["c", "f", "j"]
target = "a"
Output: "c"
Input:
letters = ["c", "f", "j"]
target = "c"
Output: "f"
Input:
letters = ["c", "f", "j"]
target = "d"
Output: "f"
Input:
letters = ["c", "f", "j"]
target = "g"
Output: "j"
Input:
letters = ["c", "f", "j"]
target = "j"
Output: "c"
Input:
letters = ["c", "f", "j"]
target = "k"
Output: "c"
"""
class Solution:
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
return letters[0] if letters[-1]<=target else letters[bisect.bisect_right(letters,target)]
| 0
| 254
| 24
|
2b341b6cc86aecc7029b54867d3868098ce9287a
| 388
|
py
|
Python
|
python/edgeNode/menu.py
|
santosh/.nuke
|
1c87f82de0fcfcebfb3396ad65deffc754dae77a
|
[
"BSD-3-Clause"
] | 1
|
2020-02-06T22:31:39.000Z
|
2020-02-06T22:31:39.000Z
|
python/edgeNode/menu.py
|
santosh/.nuke
|
1c87f82de0fcfcebfb3396ad65deffc754dae77a
|
[
"BSD-3-Clause"
] | 9
|
2017-08-30T11:20:13.000Z
|
2020-02-21T19:41:03.000Z
|
python/edgeNode/menu.py
|
santosh/.nuke
|
1c87f82de0fcfcebfb3396ad65deffc754dae77a
|
[
"BSD-3-Clause"
] | 2
|
2019-08-22T07:41:04.000Z
|
2021-01-19T14:56:04.000Z
|
# -*- coding: utf-8 -*-
import nuke
import edgeNode
SantoshMenu.addCommand("edgeNode/Jump to First", "edgeNode.jump_to_edge_node('top')")
SantoshMenu.addCommand("edgeNode/Jump to Last", "edgeNode.jump_to_edge_node('bottom')")
SantoshMenu.addCommand("edgeNode/View First", "edgeNode.view_edge_node('top')")
SantoshMenu.addCommand("edgeNode/View Last", "edgeNode.view_edge_node('bottom')")
| 48.5
| 87
| 0.773196
|
# -*- coding: utf-8 -*-
import nuke
import edgeNode
SantoshMenu.addCommand("edgeNode/Jump to First", "edgeNode.jump_to_edge_node('top')")
SantoshMenu.addCommand("edgeNode/Jump to Last", "edgeNode.jump_to_edge_node('bottom')")
SantoshMenu.addCommand("edgeNode/View First", "edgeNode.view_edge_node('top')")
SantoshMenu.addCommand("edgeNode/View Last", "edgeNode.view_edge_node('bottom')")
| 0
| 0
| 0
|
c5d1f9fb86149208278b6c383c655979144a9a0e
| 5,661
|
py
|
Python
|
code/multiclient.py
|
xwan3115/Chat-Room
|
1d3415ead06b4f612877909af9da31de327b47fb
|
[
"MIT"
] | null | null | null |
code/multiclient.py
|
xwan3115/Chat-Room
|
1d3415ead06b4f612877909af9da31de327b47fb
|
[
"MIT"
] | null | null | null |
code/multiclient.py
|
xwan3115/Chat-Room
|
1d3415ead06b4f612877909af9da31de327b47fb
|
[
"MIT"
] | null | null | null |
#Python 3
#Usage: python3 UDPClient3.py localhost 12000
#coding: utf-8
from socket import *
import sys
# The argument of client
servername = sys.argv[1]
serverPort = sys.argv[2]
udpPort = sys.argv[3]
serverPort = int(serverPort)
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((servername, serverPort))
ifloged = authenticate()
while ifloged:
print("Welcome to TOOM!")
allcommand = input("Enter one of the following commands (MSG, DLT, EDT, RDM, ATU, OUT, UPD):")
command = allcommand[0:3]
if command == 'MSG':
if allcommand == 'MSG':
print("Error! Need message after MSG command\n")
else:
clientSocket.sendall('MSG'.encode('utf-8'))
msg(allcommand[4::])
elif command == 'DLT':
# We need to check the usage of DLT
if allcommand == 'DLT':
print("Error! Need seq number and timestamp after DLT command\n")
else:
clientSocket.sendall('DLT'.encode('utf-8'))
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number and timestamp after DLT command\n")
else:
clientSocket.sendall(info.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
dlt(recev)
elif command == 'EDT':
if allcommand == 'EDT':
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
clientSocket.sendall('EDT'.encode('utf-8'))
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
clientSocket.sendall(info.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
edt(recev)
elif command == 'RDM':
if allcommand == 'RDM':
print("Error! Need timestamp after EDT command\n")
else:
clientSocket.sendall('RDM'.encode('utf-8'))
info = allcommand[4::]
clientSocket.sendall(info.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
print(recev)
elif command == 'ATU':
if allcommand == command:
clientSocket.sendall('ATU'.encode('utf-8'))
print('The active user list returned: \n')
info = clientSocket.recv(2048).decode('utf-8')
print(info)
else:
print("Error! ATU command does not take any argument.\n")
elif command == 'UPD':
pass
elif command == 'OUT':
if allcommand == command:
clientSocket.sendall('OUT'.encode('utf-8'))
info = clientSocket.recv(2048).encode('utf-8')
print("Thank you for using. You have logged out.\n")
break
else:
print("Error! OUT command does not take any argument.\n")
else:
print("This command is invalid. Please try again with either one of MSG, DLT, EDT, RDM, ATU, OUT and UPD\n")
clientSocket.close()
| 39.041379
| 117
| 0.571984
|
#Python 3
#Usage: python3 UDPClient3.py localhost 12000
#coding: utf-8
from socket import *
import sys
# The argument of client
servername = sys.argv[1]
serverPort = sys.argv[2]
udpPort = sys.argv[3]
serverPort = int(serverPort)
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((servername, serverPort))
def authenticate():
while True:
receivedMessage = clientSocket.recv(2048)
receivedMessage = receivedMessage.decode('utf-8')
if receivedMessage == "Username\r\n":
message = input("Username: ")
clientSocket.sendall(message.encode('utf-8'))
elif receivedMessage == "Password\r\n":
message = input("Password: ")
clientSocket.sendall(message.encode('utf-8'))
elif receivedMessage == "Invalid Password\r\n":
print("Invalid Password. Please try again\n")
message = input("Password: ")
clientSocket.sendall(message.encode('utf-8'))
elif receivedMessage == "Locked\r\n":
print("Invalid Password. Your account has been blocked. Please try again later\n")
return False
elif receivedMessage == "Still locked\r\n":
print("Your account is blocked due to multiple login failures. Please try again later\n")
return False
elif receivedMessage == "Login Success\r\n":
clientSocket.sendall(udpPort.encode('utf-8'))
return True
def msg(word):
clientSocket.sendall(word.encode('utf-8'))
confirm = clientSocket.recv(2048).decode('utf-8')
confirm = confirm.split()
time = ' '.join(confirm[1::])
message = 'Message ' + '#' + confirm[0] + ' ' + 'posted at ' + time +'.\n'
print(message)
def dlt(info):
if info == 'Seq':
print('The sequence number you provided is invalid\n')
elif info == 'User':
print('You do not have the authority to delete this message\n')
elif info == 'Timestamp':
print('The timestamp you provided does not match the log. Please check\n')
elif info == 'Delete':
time = clientSocket.recv(2048).decode('utf-8')
print('The deletion at '+time+' is successful\n')
def edt(info):
if info == 'Seq':
print('The sequence number you provided is invalid\n')
elif info == 'User':
print('You do not have the authority to delete this message\n')
elif info == 'Timestamp':
print('The timestamp you provided does not match the log. Please check\n')
elif info == 'Edit':
time = clientSocket.recv(2048).decode('utf-8')
print('The deletion at ' + time + ' is successful\n')
def upd():
pass
ifloged = authenticate()
while ifloged:
print("Welcome to TOOM!")
allcommand = input("Enter one of the following commands (MSG, DLT, EDT, RDM, ATU, OUT, UPD):")
command = allcommand[0:3]
if command == 'MSG':
if allcommand == 'MSG':
print("Error! Need message after MSG command\n")
else:
clientSocket.sendall('MSG'.encode('utf-8'))
msg(allcommand[4::])
elif command == 'DLT':
# We need to check the usage of DLT
if allcommand == 'DLT':
print("Error! Need seq number and timestamp after DLT command\n")
else:
clientSocket.sendall('DLT'.encode('utf-8'))
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number and timestamp after DLT command\n")
else:
clientSocket.sendall(info.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
dlt(recev)
elif command == 'EDT':
if allcommand == 'EDT':
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
clientSocket.sendall('EDT'.encode('utf-8'))
info = allcommand[4::]
lists = info.split()
if len(lists) <= 2:
print("Error! Need seq number, timestamp, and modified message after EDT command\n")
else:
clientSocket.sendall(info.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
edt(recev)
elif command == 'RDM':
if allcommand == 'RDM':
print("Error! Need timestamp after EDT command\n")
else:
clientSocket.sendall('RDM'.encode('utf-8'))
info = allcommand[4::]
clientSocket.sendall(info.encode('utf-8'))
recev = clientSocket.recv(2048).decode('utf-8')
print(recev)
elif command == 'ATU':
if allcommand == command:
clientSocket.sendall('ATU'.encode('utf-8'))
print('The active user list returned: \n')
info = clientSocket.recv(2048).decode('utf-8')
print(info)
else:
print("Error! ATU command does not take any argument.\n")
elif command == 'UPD':
pass
elif command == 'OUT':
if allcommand == command:
clientSocket.sendall('OUT'.encode('utf-8'))
info = clientSocket.recv(2048).encode('utf-8')
print("Thank you for using. You have logged out.\n")
break
else:
print("Error! OUT command does not take any argument.\n")
else:
print("This command is invalid. Please try again with either one of MSG, DLT, EDT, RDM, ATU, OUT and UPD\n")
clientSocket.close()
| 2,269
| 0
| 123
|
9254cee6730565f0da3ef736b1d1850ce942a265
| 461
|
py
|
Python
|
openstack_dashboard/dashboards/techbk_head/dashboard.py
|
TechBK/horizon-dev
|
db426842116af515f00b9e7c887f4f3f9f28298c
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/techbk_head/dashboard.py
|
TechBK/horizon-dev
|
db426842116af515f00b9e7c887f4f3f9f28298c
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/techbk_head/dashboard.py
|
TechBK/horizon-dev
|
db426842116af515f00b9e7c887f4f3f9f28298c
|
[
"Apache-2.0"
] | null | null | null |
from django.utils.translation import ugettext_lazy as _
import horizon
horizon.register(Techbk_Head)
| 24.263158
| 86
| 0.704989
|
from django.utils.translation import ugettext_lazy as _
import horizon
class Mygroup(horizon.PanelGroup):
slug = "firstgroup"
name = _("First Group")
panels = ('firstpanel','newpanel',)
class Techbk_Head(horizon.Dashboard):
name = _("Techbk Head")
slug = "techbk_head"
panels = (Mygroup,) # Add your panels here.
default_panel = 'firstpanel' # Specify the slug of the dashboard's default panel.
horizon.register(Techbk_Head)
| 0
| 310
| 46
|
0394f742acfd826dd28d708c88756dd5f480cf2f
| 3,336
|
py
|
Python
|
orchestra/plugins/forms.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 68
|
2015-02-09T10:28:44.000Z
|
2022-03-12T11:08:36.000Z
|
orchestra/plugins/forms.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 17
|
2015-05-01T18:10:03.000Z
|
2021-03-19T21:52:55.000Z
|
orchestra/plugins/forms.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 29
|
2015-03-31T04:51:03.000Z
|
2022-02-17T02:58:50.000Z
|
from django import forms
from django.utils.encoding import force_text
from orchestra.admin.utils import admin_link
from orchestra.forms.widgets import SpanWidget
| 45.081081
| 99
| 0.603717
|
from django import forms
from django.utils.encoding import force_text
from orchestra.admin.utils import admin_link
from orchestra.forms.widgets import SpanWidget
class PluginForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.plugin_field in self.fields:
value = self.plugin.get_name()
display = '%s <a href=".">change</a>' % force_text(self.plugin.verbose_name)
self.fields[self.plugin_field].widget = SpanWidget(original=value, display=display)
help_text = self.fields[self.plugin_field].help_text
self.fields[self.plugin_field].help_text = getattr(self.plugin, 'help_text', help_text)
class PluginDataForm(PluginForm):
data = forms.CharField(widget=forms.HiddenInput, required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance:
for field in self.declared_fields:
initial = self.fields[field].initial
self.fields[field].initial = self.instance.data.get(field, initial)
if self.instance.pk:
# Admin Readonly fields are not availeble in self.fields, so we use Meta
plugin = getattr(self.instance, '%s_class' % self.plugin_field)
plugin_help_text = getattr(plugin, 'help_text', '')
model_help_text = self.instance._meta.get_field(self.plugin_field).help_text
self._meta.help_texts = {
self.plugin_field: plugin_help_text or model_help_text
}
for field in self.plugin.get_change_readonly_fields():
value = getattr(self.instance, field, None) or self.instance.data.get(field)
display = value
foo_display = getattr(self.instance, 'get_%s_display' % field, None)
if foo_display:
display = foo_display()
self.fields[field].required = False
self.fields[field].widget = SpanWidget(original=value, display=display)
def clean(self):
super().clean()
data = {}
# Update data fields
for field in self.declared_fields:
try:
data[field] = self.cleaned_data[field]
except KeyError:
data[field] = self.data[field]
# Keep old data fields
for field, value in self.instance.data.items():
if field not in data:
try:
data[field] = self.cleaned_data[field]
except KeyError:
data[field] = value
self.cleaned_data['data'] = data
class PluginModelAdapterForm(PluginForm):
def __init__(self, *args, **kwargs):
super(PluginForm, self).__init__(*args, **kwargs)
if self.plugin_field in self.fields:
# Provide a link to the related DB object change view
value = self.plugin.related_instance.pk
link = admin_link()(self.plugin.related_instance)
display = '%s <a href=".">change</a>' % link
self.fields[self.plugin_field].widget = SpanWidget(original=value, display=display)
help_text = self.fields[self.plugin_field].help_text
| 2,873
| 176
| 121
|
eb1d948d1dba72846fa6a82b0a43ff897a809dbe
| 10,254
|
py
|
Python
|
DA_test.py
|
selva604/DA_TBN
|
240b3561eb5957e2827a5b8ef8fbd991d51489f4
|
[
"MIT"
] | 1
|
2021-06-17T12:32:21.000Z
|
2021-06-17T12:32:21.000Z
|
DA_test.py
|
selva604/DA_TBN
|
240b3561eb5957e2827a5b8ef8fbd991d51489f4
|
[
"MIT"
] | null | null | null |
DA_test.py
|
selva604/DA_TBN
|
240b3561eb5957e2827a5b8ef8fbd991d51489f4
|
[
"MIT"
] | null | null | null |
import os
import argparse
import time
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
from sklearn.metrics import confusion_matrix, accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from dataset import TBNDataSet
from DA_model import TBN
from transforms import *
import pickle
import save_csv as sc
#label 正解ラベル
if __name__ == '__main__':
main()
| 37.423358
| 165
| 0.590014
|
import os
import argparse
import time
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
from sklearn.metrics import confusion_matrix, accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from dataset import TBNDataSet
from DA_model import TBN
from transforms import *
import pickle
import save_csv as sc
def average_crops(results, num_crop, num_class):
return results.cpu().numpy()\
.reshape((num_crop, args.test_segments, num_class))\
.mean(axis=0)\
.reshape((args.test_segments, 1, num_class))
def eval_video(data, net, num_class, device):
num_crop = args.test_crops
for m in args.modality:
data[m] = data[m].to(device)
rst,domain = net(data)
#print(rst)
if 'epic' not in args.dataset:
return average_crops(rst, num_crop, num_class)
else:
return {'verb': average_crops(rst[0], num_crop, num_class[0]),
'noun': average_crops(rst[1], num_crop, num_class[1])}
def evaluate_model(num_class):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = TBN(num_class, 1, args.modality,
base_model=args.arch,
consensus_type=args.crop_fusion_type,
dropout=args.dropout,
midfusion=args.midfusion)
c = 'checkpoint.pth.tar'
b = 'model_best.pth.tar'
weights = ('{weights_dir}/'+c).format(
weights_dir=args.weights_dir)
checkpoint = torch.load(weights)
print("model epoch {} best prec@1: {}".format(checkpoint['epoch'], checkpoint['best_prec1']))
base_dict = {'.'.join(k.split('.')[1:]): v for k,v in list(checkpoint['state_dict'].items())}
net.load_state_dict(base_dict)
test_transform = {}
image_tmpl = {}
for m in args.modality:
if m != 'Spec':
if args.test_crops == 1:
cropping = torchvision.transforms.Compose([
GroupScale(net.scale_size[m]),
GroupCenterCrop(net.input_size[m]),
])
elif args.test_crops == 10:
cropping = torchvision.transforms.Compose([
GroupOverSample(net.input_size[m], net.scale_size[m])
])
else:
raise ValueError("Only 1 and 10 crops are supported" +
" while we got {}".format(args.test_crops))
test_transform[m] = torchvision.transforms.Compose([
cropping, Stack(roll=args.arch == 'BNInception'),
ToTorchFormatTensor(div=args.arch != 'BNInception'),
GroupNormalize(net.input_mean[m], net.input_std[m]), ])
# Prepare dictionaries containing image name templates
# for each modality
if m in ['RGB', 'RGBDiff']:
image_tmpl[m] = "img_{:010d}.jpg"
elif m == 'Flow':
image_tmpl[m] = args.flow_prefix + "{}_{:010d}.jpg"
else:
test_transform[m] = torchvision.transforms.Compose([
Stack(roll=args.arch == 'BNInception'),
ToTorchFormatTensor(div=False), ])
data_length = net.new_length
test_loader = torch.utils.data.DataLoader(
TBNDataSet(args.dataset,
pd.read_pickle(args.test_list),
data_length,
args.modality,
image_tmpl,
visual_path=args.visual_path,
audio_path=args.audio_path,
num_segments=args.test_segments,
mode='test',
transform=test_transform,
resampling_rate=args.resampling_rate),
batch_size=1, shuffle=False,
num_workers=args.workers * 2)
net = torch.nn.DataParallel(net, device_ids=args.gpus).to(device)
with torch.no_grad():
net.eval()
results = []
total_num = len(test_loader.dataset)
proc_start_time = time.time()
max_num = args.max_num if args.max_num > 0 else total_num
for i, (data, label) in enumerate(test_loader):
if i >= max_num:
break
rst = eval_video(data, net, num_class, device)
if label != -10000: # label exists
if 'epic' not in args.dataset:
label_ = label.item()
else:
label_ = {k: v.item() for k, v in label.items()}
results.append((rst, label_))
else: # Test set (S1/S2)
results.append((rst,))
cnt_time = time.time() - proc_start_time
print('video {} done, total {}/{}, average {} sec/video'.format(
i, i + 1, total_num, float(cnt_time) / (i + 1)))
return results
#label 正解ラベル
def print_accuracy(scores, labels):
video_pred = [np.argmax(np.mean(score, axis=0)) for score in scores] #予測したラベル
#sc._csv(labels,video_pred)
print(video_pred)
print(labels)
cf = confusion_matrix(labels, video_pred).astype(float)
print(cf)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_cnt[cls_hit == 0] = 1 # to avoid divisions by zero
cls_acc = cls_hit / cls_cnt
acc = accuracy_score(labels, video_pred)
print('Accuracy {:.02f}%'.format(acc * 100))
print('Average Class Accuracy {:.02f}%'.format(np.mean(cls_acc) * 100))
def save_scores(results, scores_file):
save_dict = {}
if 'epic' not in args.dataset:
scores = np.array([result[0] for result in results])
labels = np.array([result[1] for result in results])
else:
if len(results[0]) == 2:
keys = results[0][0].keys()
scores = {k: np.array([result[0][k] for result in results]) for k in keys}
labels = {k: np.array([result[1][k] for result in results]) for k in keys}
else:
keys = results[0][0].keys()
scores = {k: np.array([result[0][k] for result in results]) for k in keys}
labels = None
save_dict['scores'] = scores
if labels is not None:
save_dict['labels'] = labels
with open(scores_file, 'wb') as f:
pickle.dump(save_dict, f)
def action_acc(results):
keys = results[0][0].keys()
predict = []#verb, nounそれぞれについて正解してるかどうかの配列を格納する ex)[True, False, True,...]*2
for task in keys:
scores = [result[0][task] for result in results]
labels = [result[1][task] for result in results]
video_pred = [np.argmax(np.mean(score, axis=0)) for score in scores]#おそらく[0,2,3,4,6]などの予測されたラベル番号が入ってるはず
video_pred = np.array(video_pred)
labels = np.array(labels)
predict.append(labels==video_pred)#正解ラベルと予測ラベルを比較して同じものをTrueとする [True, False, False,...]
print('Action Accuracy:{}%'.format(np.count_nonzero((predict[0]==predict[1])&(predict[0]==True))/float(len(predict[0]))*100))#二つの配列からTrueかつTrueのものを数え上げ全体のsizeでわる
def main():
parser = argparse.ArgumentParser(description="Standard video-level" +
" testing")
parser.add_argument('dataset', type=str,
choices=['ucf101', 'hmdb51', 'kinetics', 'epic-kitchens-55', 'epic-kitchens-100','D2_P01','D1_P08','epic-D2_P01'])
parser.add_argument('modality', type=str,
choices=['RGB', 'Flow', 'RGBDiff', 'Spec'],
nargs='+', default=['RGB', 'Flow', 'Spec'])
parser.add_argument('weights_dir', type=str)
parser.add_argument('--test_list')
parser.add_argument('--visual_path')
parser.add_argument('--audio_path')
parser.add_argument('--arch', type=str, default="resnet101")
parser.add_argument('--scores_file', type=str, default='scores')
parser.add_argument('--test_segments', type=int, default=25)
parser.add_argument('--max_num', type=int, default=-1)
parser.add_argument('--test_crops', type=int, default=10)
parser.add_argument('--input_size', type=int, default=224)
parser.add_argument('--crop_fusion_type', type=str, default='avg',
choices=['avg', 'max', 'topk'])
parser.add_argument('--k', type=int, default=3)
parser.add_argument('--dropout', type=float, default=0.7)
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--gpus', nargs='+', type=int, default=None)
parser.add_argument('--flow_prefix', type=str, default='')
parser.add_argument('--resampling_rate', type=int, default=24000)
parser.add_argument('--midfusion', choices=['concat', 'gating_concat', 'multimodal_gating'],
default='concat')
global args
args = parser.parse_args()
if args.dataset == 'ucf101':
num_class = 101
elif args.dataset == 'hmdb51':
num_class = 51
elif args.dataset == 'kinetics':
num_class = 400
elif args.dataset == 'beoid':
num_class = 34
elif args.dataset == 'epic-kitchens-55':
num_class = (125, 352)
elif args.dataset == 'epic-kitchens-100':
num_class = (97, 300)
elif args.dataset == 'D1_P08':
num_class = 8#(verb,noun)
elif args.dataset == 'D2_P01':
num_class = 8#(verb,noun)
elif args.dataset == 'epic-D2_P01':
num_class = 8#(verb,noun)
else:
raise ValueError('Unknown dataset ' + args.dataset)
results = evaluate_model(num_class)
if 'epic' in args.dataset:
if len(results[0]) == 2:
keys = results[0][0].keys()
for task in keys:
print('Evaluation of {}'.format(task.upper()))
print_accuracy([result[0][task] for result in results],
[result[1][task] for result in results])
action_acc(results)
else:
print_accuracy([result[0] for result in results],
[result[1] for result in results])
if not os.path.exists(os.path.dirname(args.scores_file)):
os.makedirs(os.path.dirname(args.scores_file))
save_scores(results, args.scores_file)
if __name__ == '__main__':
main()
| 9,875
| 0
| 160
|
f8507130797ddd9ce63f9ec8aba8f0325588ca4a
| 2,801
|
py
|
Python
|
src/napari_morphodynamics/_tests/test_widget.py
|
guiwitz/napari-morphodynamics
|
f1300bc9a45864356d83eab95660ae3efec5435b
|
[
"BSD-3-Clause"
] | null | null | null |
src/napari_morphodynamics/_tests/test_widget.py
|
guiwitz/napari-morphodynamics
|
f1300bc9a45864356d83eab95660ae3efec5435b
|
[
"BSD-3-Clause"
] | null | null | null |
src/napari_morphodynamics/_tests/test_widget.py
|
guiwitz/napari-morphodynamics
|
f1300bc9a45864356d83eab95660ae3efec5435b
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import napari_morphodynamics
import napari_morphodynamics.napari_gui
from morphodynamics.data import synth
import numpy as np
import h5py
import napari_morphodynamics
@pytest.fixture(scope="session")
def dataset(tmp_path_factory):
"""Create a dataset for testing."""
new_path = tmp_path_factory.mktemp("dataset")
im, sigs = synth.generate_dataset(height=100, width=100, steps=40, step_reverse=20,
displacement=0.5, radius=10, shifts=[3,7])
for ind, s in enumerate([im]+sigs):
h5_name = new_path.joinpath(f'synth_ch{ind+1}.h5')
with h5py.File(h5_name, "w") as f_out:
dset = f_out.create_dataset("volume", data=s, chunks=True, compression="gzip", compression_opts=1)
return new_path
@pytest.fixture
def test_data_exist(dataset):
"""Test that the data exists."""
assert dataset.joinpath("synth_ch1.h5").is_file()
assert dataset.joinpath("synth_ch2.h5").is_file()
assert dataset.joinpath("synth_ch3.h5").is_file()
def test_set_dataset(mywidget, dataset):
"""Test that the dataset is updated."""
mywidget.file_list.update_from_path(dataset)
channels = [mywidget.segm_channel.item(i).text() for i in range(mywidget.segm_channel.count())]
for i in range(3):
assert f'synth_ch{i+1}.h5' in channels
assert mywidget.param.data_folder == dataset
| 37.851351
| 110
| 0.717958
|
import pytest
import napari_morphodynamics
import napari_morphodynamics.napari_gui
from morphodynamics.data import synth
import numpy as np
import h5py
import napari_morphodynamics
@pytest.fixture(scope="session")
def dataset(tmp_path_factory):
"""Create a dataset for testing."""
new_path = tmp_path_factory.mktemp("dataset")
im, sigs = synth.generate_dataset(height=100, width=100, steps=40, step_reverse=20,
displacement=0.5, radius=10, shifts=[3,7])
for ind, s in enumerate([im]+sigs):
h5_name = new_path.joinpath(f'synth_ch{ind+1}.h5')
with h5py.File(h5_name, "w") as f_out:
dset = f_out.create_dataset("volume", data=s, chunks=True, compression="gzip", compression_opts=1)
return new_path
@pytest.fixture
def mywidget(make_napari_viewer, napari_plugin_manager):
napari_plugin_manager.register(napari_morphodynamics, name='napari-morphodynamics')
viewer = make_napari_viewer()
_, widget = viewer.window.add_plugin_dock_widget(
plugin_name='napari-morphodynamics', widget_name='MorphoWidget',
)
return widget
def test_data_exist(dataset):
"""Test that the data exists."""
assert dataset.joinpath("synth_ch1.h5").is_file()
assert dataset.joinpath("synth_ch2.h5").is_file()
assert dataset.joinpath("synth_ch3.h5").is_file()
def test_project_widget(mywidget):
assert isinstance(mywidget, napari_morphodynamics.napari_gui.MorphoWidget)
def test_set_dataset(mywidget, dataset):
"""Test that the dataset is updated."""
mywidget.file_list.update_from_path(dataset)
channels = [mywidget.segm_channel.item(i).text() for i in range(mywidget.segm_channel.count())]
for i in range(3):
assert f'synth_ch{i+1}.h5' in channels
assert mywidget.param.data_folder == dataset
def test_load(mywidget, dataset):
# set path, set channels to use
mywidget.file_list.update_from_path(dataset)
channels = [mywidget.segm_channel.item(i).text() for i in range(mywidget.segm_channel.count())]
mywidget.segm_channel.setCurrentRow(channels.index('synth_ch1.h5'))
for i in range(mywidget.signal_channel.count()):
if mywidget.signal_channel.item(i).text() in ['synth_ch2.h5', 'synth_ch3.h5']:
mywidget.signal_channel.item(i).setSelected(True)
# load the data
mywidget._on_load_dataset()
# check that the data is loaded
assert mywidget.data.dims == (100,100)
assert mywidget.param.morpho_name == 'synth_ch1.h5'
# check that the data is added to viewer
assert len(mywidget.viewer.layers) == 3
assert mywidget.viewer.layers[0].name == 'synth_ch1.h5'
assert mywidget.viewer.layers[1].name == 'signal synth_ch3.h5'
assert mywidget.viewer.layers[2].name == 'signal synth_ch2.h5'
| 1,349
| 0
| 68
|
2a1405546ed30a2c236a5eefc1d8dcfc524c4fb2
| 2,711
|
py
|
Python
|
scratch/odf.py
|
JohnGriffiths/dipy
|
5fb38e9b77547cdaf5eb140730444535733ae01d
|
[
"BSD-3-Clause"
] | 3
|
2015-07-31T20:43:18.000Z
|
2019-07-26T13:58:07.000Z
|
scratch/odf.py
|
JohnGriffiths/dipy
|
5fb38e9b77547cdaf5eb140730444535733ae01d
|
[
"BSD-3-Clause"
] | 9
|
2015-05-13T17:44:42.000Z
|
2018-05-27T20:09:55.000Z
|
scratch/odf.py
|
JohnGriffiths/dipy
|
5fb38e9b77547cdaf5eb140730444535733ae01d
|
[
"BSD-3-Clause"
] | 3
|
2016-08-05T22:43:16.000Z
|
2017-06-23T18:35:13.000Z
|
import numpy as np
from enthought.mayavi import mlab
import Image
if __name__ == '__main__':
import dipy.core.qball as qball
from dipy.io.bvectxt import read_bvec_file
filename='/Users/bagrata/HARDI/E1322S8I1.nii.gz'
grad_table_filename='/Users/bagrata/HARDI/E1322S8I1.bvec'
from nipy import load_image, save_image
grad_table, b_values = read_bvec_file(grad_table_filename)
img = load_image(filename)
print 'input dimensions: '
print img.ndim
print 'image size: '
print img.shape
print 'image affine: '
print img.affine
print 'images has pixels with size: '
print np.dot(img.affine, np.eye(img.ndim+1)).diagonal()[0:3]
data = np.asarray(img)
theta, phi = np.mgrid[0:2*np.pi:64*1j, 0:np.pi:32*1j]
odf_i = qball.ODF(data[188:192,188:192,22:24,:],4,grad_table,b_values)
disp_odf(odf_i[0:1,0:2,0:2])
| 32.27381
| 79
| 0.568794
|
import numpy as np
from enthought.mayavi import mlab
import Image
def disp_odf(sph_map, theta_res=64, phi_res=32, colormap='RGB', colors=256):
pi = np.pi
sin = np.sin
cos = np.cos
theta, phi = np.mgrid[0:2*pi:theta_res*1j, 0:pi:phi_res*1j]
x = sin(phi)*cos(theta)
y = sin(phi)*sin(theta)
z = cos(phi)
nvox = np.prod(sph_map.shape)
x_cen, y_cen, z_cen = _3grid(sph_map.shape)
odf_values = sph_map.evaluate_at(theta, phi)
max_value = odf_values.max()
mlab.figure()
for ii in range(nvox):
odf_ii = odf_values.reshape(nvox, theta_res, phi_res)[ii,:,:]
odf_ii /= max_value * 2
if colormap == 'RGB':
rgb = np.r_['-1,3,0', x*odf_ii, y*odf_ii, z*odf_ii]
rgb = np.abs(rgb*255/rgb.max()).astype('uint8')
odf_im = Image.fromarray(rgb, mode='RGB')
odf_im = odf_im.convert('P', palette=Image.ADAPTIVE, colors=colors)
lut = np.empty((colors,4),'uint8')
lut[:,3] = 255
lut[:,0:3] = np.reshape(odf_im.getpalette(),(colors,3))
oo = mlab.mesh(x*odf_ii + x_cen.flat[ii],
y*odf_ii + y_cen.flat[ii],
z*odf_ii + z_cen.flat[ii],
scalars=np.int16(odf_im))
oo.module_manager.scalar_lut_manager.lut.table=lut
else:
oo = mlab.mesh(x*odf_ii + x_cen.flat[ii],
y*odf_ii + y_cen.flat[ii],
z*odf_ii + z_cen.flat[ii],
scalars=odf_ii,
colormap=colormap)
def _3grid(shape):
if len(shape) > 3:
raise ValueError('cannot display 4d image')
elif len(shape) < 3:
d = [1, 1, 1]
d[0:len(shape)] = shape
else:
d = shape
return np.mgrid[0:d[0], 0:d[1], 0:d[2]]
if __name__ == '__main__':
import dipy.core.qball as qball
from dipy.io.bvectxt import read_bvec_file
filename='/Users/bagrata/HARDI/E1322S8I1.nii.gz'
grad_table_filename='/Users/bagrata/HARDI/E1322S8I1.bvec'
from nipy import load_image, save_image
grad_table, b_values = read_bvec_file(grad_table_filename)
img = load_image(filename)
print 'input dimensions: '
print img.ndim
print 'image size: '
print img.shape
print 'image affine: '
print img.affine
print 'images has pixels with size: '
print np.dot(img.affine, np.eye(img.ndim+1)).diagonal()[0:3]
data = np.asarray(img)
theta, phi = np.mgrid[0:2*np.pi:64*1j, 0:np.pi:32*1j]
odf_i = qball.ODF(data[188:192,188:192,22:24,:],4,grad_table,b_values)
disp_odf(odf_i[0:1,0:2,0:2])
| 1,789
| 0
| 46
|
50d614b501cdda6de87d46c5e94382c1d58a0243
| 513
|
py
|
Python
|
word_edit/spellcheck_utils.py
|
binhetech/PIE-GEC
|
99b46f38d11f8c10c3e0c0758c68633729e70ce7
|
[
"MIT"
] | 1
|
2021-07-14T07:08:48.000Z
|
2021-07-14T07:08:48.000Z
|
word_edit/spellcheck_utils.py
|
binhetech/PIE-GEC
|
99b46f38d11f8c10c3e0c0758c68633729e70ce7
|
[
"MIT"
] | null | null | null |
word_edit/spellcheck_utils.py
|
binhetech/PIE-GEC
|
99b46f38d11f8c10c3e0c0758c68633729e70ce7
|
[
"MIT"
] | null | null | null |
import re
reg_ex = re.compile(r"^[a-z][a-z]*[a-z]$")
no_reg_ex = re.compile(r".*[0-9].*")
mc_reg_ex = re.compile(r".*[A-Z].*[A-Z].*")
def containsNumber(text):
"""包含数字."""
return no_reg_ex.match(text)
def containsMultiCapital(text):
"""包含多个大写字母."""
return mc_reg_ex.match(text)
def can_spellcheck(w: str):
"""检查是否需要进行拼写检查."""
# return not ((not reg_ex.match(w)) or containsMultiCapital(w) or containsNumber
if reg_ex.match(w):
return True
else:
return False
| 20.52
| 84
| 0.617934
|
import re
reg_ex = re.compile(r"^[a-z][a-z]*[a-z]$")
no_reg_ex = re.compile(r".*[0-9].*")
mc_reg_ex = re.compile(r".*[A-Z].*[A-Z].*")
def containsNumber(text):
"""包含数字."""
return no_reg_ex.match(text)
def containsMultiCapital(text):
"""包含多个大写字母."""
return mc_reg_ex.match(text)
def can_spellcheck(w: str):
"""检查是否需要进行拼写检查."""
# return not ((not reg_ex.match(w)) or containsMultiCapital(w) or containsNumber
if reg_ex.match(w):
return True
else:
return False
| 0
| 0
| 0
|
9ec7e7ba75545a14f295f33d86c287dc2df77f62
| 1,951
|
py
|
Python
|
tests/test_community.py
|
Biomathsys/ncmw
|
e3a128f15a4183a67f32f9874519c18a319accc8
|
[
"MIT"
] | 5
|
2022-01-19T12:55:35.000Z
|
2022-03-10T13:17:22.000Z
|
tests/test_community.py
|
Biomathsys/ncmw
|
e3a128f15a4183a67f32f9874519c18a319accc8
|
[
"MIT"
] | 5
|
2022-02-18T08:17:04.000Z
|
2022-02-23T13:50:15.000Z
|
tests/test_community.py
|
Biomathsys/ncmw
|
e3a128f15a4183a67f32f9874519c18a319accc8
|
[
"MIT"
] | 1
|
2021-12-02T12:23:59.000Z
|
2021-12-02T12:23:59.000Z
|
from pandas.core.algorithms import isin
import pytest
from ncmw.utils import get_models
import numpy as np
from pandas import DataFrame
from ncmw.community.community_models import (
BagOfReactionsModel,
ShuttleCommunityModel,
create_stoichiometry_matrix,
)
MODELS = get_models("models")
COMMUNITY_MODELS = [BagOfReactionsModel, ShuttleCommunityModel]
@pytest.mark.slow
@pytest.mark.parametrize("community", COMMUNITY_MODELS)
@pytest.mark.parametrize("model", MODELS)
| 32.516667
| 85
| 0.6694
|
from pandas.core.algorithms import isin
import pytest
from ncmw.utils import get_models
import numpy as np
from pandas import DataFrame
from ncmw.community.community_models import (
BagOfReactionsModel,
ShuttleCommunityModel,
create_stoichiometry_matrix,
)
MODELS = get_models("models")
COMMUNITY_MODELS = [BagOfReactionsModel, ShuttleCommunityModel]
@pytest.mark.slow
@pytest.mark.parametrize("community", COMMUNITY_MODELS)
def test_community_models(community):
model = community(MODELS)
N = len(model.models)
growth = model.slim_optimize()
assert growth > 0
growth2, single_growths, sol = model.optimize()
assert abs(growth2 - growth) < 1e-3
# Test necessary functionality
try:
for i in range(len(MODELS)):
growth_single = model.single_optimize(i)
growth_single_ref = MODELS[i].slim_optimize()
assert growth_single is not None
assert growth_single_ref is not None
assert abs(sum(single_growths) - growth2) < 1e-3
except:
assert False
try:
if isinstance(model, ShuttleCommunityModel):
summary = model.summary()
assert isinstance(summary, DataFrame)
coopm = model.compute_COOPM(0.1)
assert isinstance(coopm, dict)
coopm = model.compute_COOPM(growth, enforce_survival=False)
assert isinstance(coopm, dict)
# summary = model.compute_convex_combination(np.ones(N) / N, maxbiomass=0.1)
# assert np.isclose(growth, 0.1)
# assert isinstance(summary, DataFrame)
except:
assert False
@pytest.mark.parametrize("model", MODELS)
def test_create_stoichiometry_matrix(model):
S, met, rec = create_stoichiometry_matrix(model)
shape = S.shape
# assert len(met) == len(model.metabolites)
assert len(rec) == len(model.reactions)
assert shape[1] == len(rec)
| 1,395
| 0
| 46
|
3bbd10b60a915bd390b2704473ef45e5842576df
| 1,499
|
py
|
Python
|
server/assets/ImageRotationCorrection.py
|
Trafalcon/Parsr
|
d5aab6d1b4da6c37a30b25062fcaff682daa0a83
|
[
"Apache-2.0"
] | 1
|
2020-01-15T03:49:04.000Z
|
2020-01-15T03:49:04.000Z
|
server/assets/ImageRotationCorrection.py
|
Trafalcon/Parsr
|
d5aab6d1b4da6c37a30b25062fcaff682daa0a83
|
[
"Apache-2.0"
] | null | null | null |
server/assets/ImageRotationCorrection.py
|
Trafalcon/Parsr
|
d5aab6d1b4da6c37a30b25062fcaff682daa0a83
|
[
"Apache-2.0"
] | 1
|
2020-01-25T19:35:34.000Z
|
2020-01-25T19:35:34.000Z
|
# -*- coding: utf-8 -*-
"""
Automatically detect rotation and line spacing of an image of text using
Radon transform
If image is rotated by the inverse of the output, the lines will be
horizontal (though they may be upside-down depending on the original image)
It doesn't work with black borders
Courtesy: https://gist.github.com/endolith/334196bac1cac45a4893#
"""
from __future__ import division, print_function
import warnings
warnings.filterwarnings("ignore")
import sys
from PIL import Image
from skimage.transform import radon
from numpy import asarray, mean, array, sqrt, mean
try:
# More accurate peak finding from
# https://gist.github.com/endolith/255291#file-parabolic-py
from parabolic import parabolic
except ImportError:
from numpy import argmax
if __name__ == "__main__":
main()
| 27.759259
| 75
| 0.691127
|
# -*- coding: utf-8 -*-
"""
Automatically detect rotation and line spacing of an image of text using
Radon transform
If image is rotated by the inverse of the output, the lines will be
horizontal (though they may be upside-down depending on the original image)
It doesn't work with black borders
Courtesy: https://gist.github.com/endolith/334196bac1cac45a4893#
"""
from __future__ import division, print_function
import warnings
warnings.filterwarnings("ignore")
import sys
from PIL import Image
from skimage.transform import radon
from numpy import asarray, mean, array, sqrt, mean
try:
# More accurate peak finding from
# https://gist.github.com/endolith/255291#file-parabolic-py
from parabolic import parabolic
def argmax(x):
import numpy
return parabolic(x, numpy.argmax(x))[0]
except ImportError:
from numpy import argmax
def rms_flat(y):
return sqrt(mean(y**2))
def compute_rotation_in_degrees(filename):
I = asarray(Image.open(filename).convert('L'))
J = I - mean(I)
sinogram = radon(J)
r = array([rms_flat(line) for line in sinogram.transpose()])
rotation = argmax(r)
return rotation
def main():
try:
filename = str(sys.argv[1])
rotation = compute_rotation_in_degrees(filename)
print('{:.2f}'.format(rotation))
sys.stdout.flush()
sys.exit(0)
except Exception as e:
print('error')
sys.stdout.flush()
sys.exit(1)
if __name__ == "__main__":
main()
| 588
| 0
| 96
|
015174dcc56a54eb2c9777530571307d3a152bea
| 5,016
|
py
|
Python
|
requestor-python/engine.py
|
Edhendil/golem-transcoding
|
69ef784e7c7b36b45b1e5f2e292dacd9b9dec3bd
|
[
"MIT"
] | 1
|
2021-10-14T07:48:43.000Z
|
2021-10-14T07:48:43.000Z
|
requestor-python/engine.py
|
Edhendil/golem-transcoding
|
69ef784e7c7b36b45b1e5f2e292dacd9b9dec3bd
|
[
"MIT"
] | null | null | null |
requestor-python/engine.py
|
Edhendil/golem-transcoding
|
69ef784e7c7b36b45b1e5f2e292dacd9b9dec3bd
|
[
"MIT"
] | null | null | null |
from yapapi.runner import Engine, Task, vm
from yapapi.runner.ctx import WorkContext
from yapapi.log import log_summary, log_event_repr
from datetime import timedelta
import json
import uuid
from core import TranscodingData, TranscodingTask, GolemParameters, SubtaskFinishedEvent
# image hash of the geomandel docker image uploaded to golem
# _IMAGE_LINK = "896909125d8dc19918dc73fe7540ca45cfe87f434ed37f51edb20a4e"
# geomandel image
# _IMAGE_LINK = "47cd0f045333d837304d61f74266a1bcd49ad3cb0690a10f08d37bf4"
# ubuntu ffmpeg
_IMAGE_LINK = "febcd478b3e00b3d40a6d2a69a4932eedcc4440a1fe7658fbb626264"
class YagnaContext:
"""Holds information about the docker image and constraints for all the tasks to be executed in this context."""
def __create_engine(self):
"""Creates yagna engine"""
return Engine(
package=self.package,
max_workers=self.max_workers,
budget=self.budget,
timeout=timedelta(minutes=25),
subnet_tag=self.subnet_tag,
# By passing `event_emitter=log_summary()` we enable summary logging.
# See the documentation of the `yapapi.log` module on how to set
# the level of detail and format of the logged information.
event_emitter=log_summary(log_event_repr),
)
async def execute(self, tasks: [Task], worker_function, on_task_complete):
"""Executes a set of tasks on a preconfigured docker image.
Parameters
----------
tasks : [Task]
Yagna tasks
worker_function : (ctx: WorkContext, tasks) -> [Work]
Function returning a sequence of instructions for each of the provided tasks.
on_task_complete : (task: Task) -> None
Callback executed when a task has been processed.
"""
async with self.__create_engine() as engine:
async for task in engine.map(worker_function, tasks):
on_task_complete(task)
# docker image path to JSON file with task parameters
_TASK_INPUT_REMOTE_PATH = "/golem/work/input"
# minimal provider node memory constraint, not configurable
_MINIMAL_MEMORY = 0.5
# minimal provider node storage constraint, not configurable
_MINIMAL_STORAGE = 2.0
class TranscodingEngine:
"""Converts geomandel subtasks to yagna subtasks and sends them to Yagna Engine"""
@staticmethod
async def instance(golem_parameters: GolemParameters):
"""Creates an instance of TranscodingEngine. Static factory."""
repository = ImageRepository()
# retrieve the image link to ffmpeg docker image together with constraints
package = await repository.get_image(_MINIMAL_MEMORY, _MINIMAL_STORAGE)
# prepares the yagna engine
yagna = YagnaContext(package, golem_parameters.max_workers, golem_parameters.budget, golem_parameters.subnet_tag)
# wraps it in transcoding layer
return TranscodingEngine(yagna)
async def execute(self, tasks: [TranscodingData]):
"""Translates subtasks into Yagna format and executes them."""
wrapped_tasks = self.__wrap_in_yagna_task(tasks)
await self.yagna.execute(wrapped_tasks, self.__transcode_remote, self.__log_completion)
async def __transcode_remote(self, ctx: WorkContext, tasks: [TranscodingTask]):
"""Creates a set of instructions for each subtask"""
async for task in tasks:
remote_output_path: str = f"/golem/work/output.{task.data.extension}"
# Send input video to remote node
ctx.send_file(task.data.input, _TASK_INPUT_REMOTE_PATH)
# Execute ffmpeg command.
ctx.run("/usr/bin/ffmpeg", "-i", _TASK_INPUT_REMOTE_PATH, remote_output_path)
# Download the output file.
ctx.download_file(remote_output_path, task.data.output)
# Return a sequence of commands to be executed when remote node agrees to process a task.
yield ctx.commit()
task.accept_task()
def __wrap_in_yagna_task(self, data: []):
"""Converts any task data sequence to Yagna wrapper"""
for item in data:
yield Task(data=item)
| 42.151261
| 121
| 0.691786
|
from yapapi.runner import Engine, Task, vm
from yapapi.runner.ctx import WorkContext
from yapapi.log import log_summary, log_event_repr
from datetime import timedelta
import json
import uuid
from core import TranscodingData, TranscodingTask, GolemParameters, SubtaskFinishedEvent
# image hash of the geomandel docker image uploaded to golem
# _IMAGE_LINK = "896909125d8dc19918dc73fe7540ca45cfe87f434ed37f51edb20a4e"
# geomandel image
# _IMAGE_LINK = "47cd0f045333d837304d61f74266a1bcd49ad3cb0690a10f08d37bf4"
# ubuntu ffmpeg
_IMAGE_LINK = "febcd478b3e00b3d40a6d2a69a4932eedcc4440a1fe7658fbb626264"
class ImageRepository:
async def get_image(self, minimal_memory: float, minimal_storage: float):
"""Retrieve a link to the ffmpeg docker image together with constraints"""
return await vm.repo(
image_hash=_IMAGE_LINK,
min_mem_gib=minimal_memory,
min_storage_gib=minimal_storage,
)
class YagnaContext:
"""Holds information about the docker image and constraints for all the tasks to be executed in this context."""
def __init__(self, package, max_workers: int, budget: float, subnet_tag: str):
self.package = package
self.max_workers = max_workers
self.budget = budget
self.subnet_tag = subnet_tag
def __create_engine(self):
"""Creates yagna engine"""
return Engine(
package=self.package,
max_workers=self.max_workers,
budget=self.budget,
timeout=timedelta(minutes=25),
subnet_tag=self.subnet_tag,
# By passing `event_emitter=log_summary()` we enable summary logging.
# See the documentation of the `yapapi.log` module on how to set
# the level of detail and format of the logged information.
event_emitter=log_summary(log_event_repr),
)
async def execute(self, tasks: [Task], worker_function, on_task_complete):
"""Executes a set of tasks on a preconfigured docker image.
Parameters
----------
tasks : [Task]
Yagna tasks
worker_function : (ctx: WorkContext, tasks) -> [Work]
Function returning a sequence of instructions for each of the provided tasks.
on_task_complete : (task: Task) -> None
Callback executed when a task has been processed.
"""
async with self.__create_engine() as engine:
async for task in engine.map(worker_function, tasks):
on_task_complete(task)
# docker image path to JSON file with task parameters
_TASK_INPUT_REMOTE_PATH = "/golem/work/input"
# minimal provider node memory constraint, not configurable
_MINIMAL_MEMORY = 0.5
# minimal provider node storage constraint, not configurable
_MINIMAL_STORAGE = 2.0
class TranscodingEngine:
"""Converts geomandel subtasks to yagna subtasks and sends them to Yagna Engine"""
def __init__(self, yagna):
self.yagna = yagna
@staticmethod
async def instance(golem_parameters: GolemParameters):
"""Creates an instance of TranscodingEngine. Static factory."""
repository = ImageRepository()
# retrieve the image link to ffmpeg docker image together with constraints
package = await repository.get_image(_MINIMAL_MEMORY, _MINIMAL_STORAGE)
# prepares the yagna engine
yagna = YagnaContext(package, golem_parameters.max_workers, golem_parameters.budget, golem_parameters.subnet_tag)
# wraps it in transcoding layer
return TranscodingEngine(yagna)
async def execute(self, tasks: [TranscodingData]):
"""Translates subtasks into Yagna format and executes them."""
wrapped_tasks = self.__wrap_in_yagna_task(tasks)
await self.yagna.execute(wrapped_tasks, self.__transcode_remote, self.__log_completion)
async def __transcode_remote(self, ctx: WorkContext, tasks: [TranscodingTask]):
"""Creates a set of instructions for each subtask"""
async for task in tasks:
remote_output_path: str = f"/golem/work/output.{task.data.extension}"
# Send input video to remote node
ctx.send_file(task.data.input, _TASK_INPUT_REMOTE_PATH)
# Execute ffmpeg command.
ctx.run("/usr/bin/ffmpeg", "-i", _TASK_INPUT_REMOTE_PATH, remote_output_path)
# Download the output file.
ctx.download_file(remote_output_path, task.data.output)
# Return a sequence of commands to be executed when remote node agrees to process a task.
yield ctx.commit()
task.accept_task()
def __log_completion(self, task: TranscodingTask):
event = SubtaskFinishedEvent(str(uuid.uuid4()), task.data.transcoding_id, task.data.extension)
print(json.dumps(event.__dict__))
def __wrap_in_yagna_task(self, data: []):
"""Converts any task data sequence to Yagna wrapper"""
for item in data:
yield Task(data=item)
| 399
| 323
| 102
|
dee92484cdddf1c5220a5e6fbd6d1fac63977161
| 4,711
|
py
|
Python
|
utils/ofb/brioche.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 864
|
2020-09-22T18:52:27.000Z
|
2022-03-28T19:57:25.000Z
|
utils/ofb/brioche.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 13
|
2020-09-24T10:42:21.000Z
|
2021-12-20T14:44:36.000Z
|
utils/ofb/brioche.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 55
|
2020-09-22T19:01:19.000Z
|
2022-03-20T09:15:45.000Z
|
#!/usr/bin/env python3
"""
Take a polyglot from mitra,
generate a OFB ciphertext which decrypts correctly under two different keys.
iv = dec(c0 ^ p0)
"""
import binascii
import os
import argparse
import re
from Crypto.Cipher import AES
from Crypto.Util.number import long_to_bytes,bytes_to_long
from Crypto.Util.number import long_to_bytes as l2b
from Crypto.Util.number import bytes_to_long as b2l
BLOCKLEN = 16
pad16 = lambda s: s + b"\0" * (16-len(s))
b2a = lambda b: repr(b)[2:-1]
dir_path = os.path.dirname(os.path.realpath(__file__))
ivsfn = os.path.join(dir_path, "ivs.txt")
with open(ivsfn, "r") as f:
iv_data = f.readlines()
IVS = {}
for l in iv_data:
if l.count("#") > 0:
l = l[:l.find("#")]
l = l.strip()
if l == "":
continue
l = re.split(r'\s+', l)
if len(l) != 6:
continue
iv,types,header1, header2, key1, key2 = l
if len(header1) != len(header2):
continue
if len(key1) != len(key2):
continue
header1 = binascii.unhexlify(header1)
header2 = binascii.unhexlify(header2)
key1 = binascii.unhexlify(key1)
key2 = binascii.unhexlify(key2)
iv = binascii.unhexlify(iv)
xor_hdr = xor(header1, header2)
IVS[(xor_hdr, key1, key2)] = iv
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Turn a non-overlapping polyglot into a dual AES-OFB ciphertext.")
parser.add_argument('polyglot',
help="input polyglot - requires special naming like 'P(10-5c).png.rar'.")
parser.add_argument('output',
help="generated file.")
parser.add_argument('-i', '--iv', default=b"0",
help="nonce - default: 0.")
parser.add_argument('-k', '--keys', nargs=2, default=[b"Now?", b"L4t3r!!!"],
help="encryption keys - default: Now? / L4t3r!!!.")
args = parser.parse_args()
fnmix = args.polyglot
fnpoc = args.output
key1, key2 = args.keys
iv = args.iv
iv = pad16(unhextry(iv))
key1 = pad16(unhextry(key1))
key2 = pad16(unhextry(key2))
with open(fnmix, "rb") as file:
dIn = file.read()
dIn = pad(dIn, BLOCKLEN) # the padding will break with formats not supporting appended data
assert not key1 == key2
# fnmix should come from Mitra and
# has a naming convention like "P(14-89)-ID3v2[Zip].4d01e2fb.mp3.zip"
swaps = [int(i, 16) for i in fnmix[fnmix.find("(") + 1:fnmix.find(")")].split("-")]
exts = fnmix[-9:].split(".")[-2:]
if fnmix.startswith("O") and \
"{" in fnmix and \
"}" in fnmix:
print("Overlap file found")
iv = BruteIv(fnmix)
print("IV: %s" % b2a(binascii.hexlify(iv)))
assert len(dIn) % 16 == 0
bCount = len(dIn) // 16
ks1 = getKS(key1, iv, bCount)
ks2 = getKS(key2, iv, bCount)
dCrypt1 = xor(dIn, ks1[:len(dIn)])
dCrypt2 = xor(dIn, ks2[:len(dIn)])
dOut = mix(dCrypt1, dCrypt2, swaps)
print("key 1:", b2a(key1.strip(b"\0")))
print("key 2:", b2a(key2.strip(b"\0")))
ctxt = dOut
output = "\n".join([
"key1: %s" % b2a(binascii.hexlify(key1)),
"key2: %s" % b2a(binascii.hexlify(key2)),
"iv: %s" % b2a(binascii.hexlify(iv)),
"ciphertext: %s" % b2a(binascii.hexlify(ctxt)),
"exts: %s" % " ".join(exts),
"origin: %s" % fnmix,
])
with open(fnpoc, "wb") as fpoc:
fpoc.write(output.encode())
fpoc.close()
| 23.673367
| 112
| 0.645511
|
#!/usr/bin/env python3
"""
Take a polyglot from mitra,
generate a OFB ciphertext which decrypts correctly under two different keys.
iv = dec(c0 ^ p0)
"""
import binascii
import os
import argparse
import re
from Crypto.Cipher import AES
from Crypto.Util.number import long_to_bytes,bytes_to_long
from Crypto.Util.number import long_to_bytes as l2b
from Crypto.Util.number import bytes_to_long as b2l
BLOCKLEN = 16
pad16 = lambda s: s + b"\0" * (16-len(s))
b2a = lambda b: repr(b)[2:-1]
def xor(_a1, _a2):
assert len(_a1) == len(_a2)
return bytes([(_a1[i] ^ _a2[i]) for i in range(len(_a1))])
def pad(_d, _alig):
d_l = len(_d)
_d = _d if 0 == d_l % _alig else _d + b'\x00' * (_alig - d_l % _alig)
assert len(_d) % _alig == 0
return _d
def unhextry(_d):
try:
_d = binascii.unhexlify(_d)
except Exception:
pass # TypeError: Non-hexadecimal digit found
return _d
def getKS(key, iv, bCount):
aesECB = AES.new(key, AES.MODE_ECB)
curBlock = iv
stream = b""
for _ in range(bCount + 1):
curBlock = aesECB.encrypt(curBlock)
stream += curBlock
assert len(stream) == 16*(bCount+1)
return stream
def mix(d1, d2, l):
assert len(d1) == len(d2)
mix = b""
start = 0
in1 = True
for end in l:
mix += d1[start:end] if in1 else d2[start:end]
in1 = not in1
start = end
mix += d1[start:] if in1 else d2[start:]
return mix
dir_path = os.path.dirname(os.path.realpath(__file__))
ivsfn = os.path.join(dir_path, "ivs.txt")
with open(ivsfn, "r") as f:
iv_data = f.readlines()
IVS = {}
for l in iv_data:
if l.count("#") > 0:
l = l[:l.find("#")]
l = l.strip()
if l == "":
continue
l = re.split(r'\s+', l)
if len(l) != 6:
continue
iv,types,header1, header2, key1, key2 = l
if len(header1) != len(header2):
continue
if len(key1) != len(key2):
continue
header1 = binascii.unhexlify(header1)
header2 = binascii.unhexlify(header2)
key1 = binascii.unhexlify(key1)
key2 = binascii.unhexlify(key2)
iv = binascii.unhexlify(iv)
xor_hdr = xor(header1, header2)
IVS[(xor_hdr, key1, key2)] = iv
if __name__=='__main__':
parser = argparse.ArgumentParser(description="Turn a non-overlapping polyglot into a dual AES-OFB ciphertext.")
parser.add_argument('polyglot',
help="input polyglot - requires special naming like 'P(10-5c).png.rar'.")
parser.add_argument('output',
help="generated file.")
parser.add_argument('-i', '--iv', default=b"0",
help="nonce - default: 0.")
parser.add_argument('-k', '--keys', nargs=2, default=[b"Now?", b"L4t3r!!!"],
help="encryption keys - default: Now? / L4t3r!!!.")
args = parser.parse_args()
fnmix = args.polyglot
fnpoc = args.output
key1, key2 = args.keys
iv = args.iv
iv = pad16(unhextry(iv))
key1 = pad16(unhextry(key1))
key2 = pad16(unhextry(key2))
with open(fnmix, "rb") as file:
dIn = file.read()
dIn = pad(dIn, BLOCKLEN) # the padding will break with formats not supporting appended data
assert not key1 == key2
# fnmix should come from Mitra and
# has a naming convention like "P(14-89)-ID3v2[Zip].4d01e2fb.mp3.zip"
swaps = [int(i, 16) for i in fnmix[fnmix.find("(") + 1:fnmix.find(")")].split("-")]
exts = fnmix[-9:].split(".")[-2:]
def BruteIv(fn):
hdr1 = fn[fn.find("{")+1:]
hdr1 = hdr1[:hdr1.find("}")]
hdr1 = binascii.unhexlify(hdr1)
hdr2 = dIn[:len(hdr1)]
hdr_xor = xor(hdr1,hdr2)
t = (hdr_xor, key1, key2)
if t in IVS:
iv = IVS[t]
print("IV already computed")
return iv
hdr_xor_l = len(hdr_xor)
aes1 = AES.new(key1, AES.MODE_ECB)
aes2 = AES.new(key2, AES.MODE_ECB)
i = 0
for i in range(2**64):
iv_s = long_to_bytes(i, 16)
block1 = aes1.encrypt(iv_s)
block2 = aes2.encrypt(iv_s)
if xor(block1[:hdr_xor_l], block2[:hdr_xor_l]) == hdr_xor:
print("Bruteforce results:")
print(" ".join("%s" % b2a(binascii.hexlify(i)) for i in [iv_s, hdr1, hdr2, key1, key2]))
return iv_s
return None
if fnmix.startswith("O") and \
"{" in fnmix and \
"}" in fnmix:
print("Overlap file found")
iv = BruteIv(fnmix)
print("IV: %s" % b2a(binascii.hexlify(iv)))
assert len(dIn) % 16 == 0
bCount = len(dIn) // 16
ks1 = getKS(key1, iv, bCount)
ks2 = getKS(key2, iv, bCount)
dCrypt1 = xor(dIn, ks1[:len(dIn)])
dCrypt2 = xor(dIn, ks2[:len(dIn)])
dOut = mix(dCrypt1, dCrypt2, swaps)
print("key 1:", b2a(key1.strip(b"\0")))
print("key 2:", b2a(key2.strip(b"\0")))
ctxt = dOut
output = "\n".join([
"key1: %s" % b2a(binascii.hexlify(key1)),
"key2: %s" % b2a(binascii.hexlify(key2)),
"iv: %s" % b2a(binascii.hexlify(iv)),
"ciphertext: %s" % b2a(binascii.hexlify(ctxt)),
"exts: %s" % " ".join(exts),
"origin: %s" % fnmix,
])
with open(fnpoc, "wb") as fpoc:
fpoc.write(output.encode())
fpoc.close()
| 1,434
| 0
| 139
|
b74f94271b1a62635133976cb4d4c8f2577f2456
| 3,763
|
py
|
Python
|
robotcode/utils/event.py
|
mardukbp/robotcode
|
0b34cf6e7931423117036fcf70f74e27da0cfb0f
|
[
"Apache-2.0"
] | 21
|
2021-03-11T19:37:31.000Z
|
2022-02-28T05:26:33.000Z
|
robotcode/utils/event.py
|
mardukbp/robotcode
|
0b34cf6e7931423117036fcf70f74e27da0cfb0f
|
[
"Apache-2.0"
] | 46
|
2021-03-11T10:58:38.000Z
|
2022-03-31T10:15:54.000Z
|
robotcode/utils/event.py
|
mardukbp/robotcode
|
0b34cf6e7931423117036fcf70f74e27da0cfb0f
|
[
"Apache-2.0"
] | 1
|
2021-11-08T10:32:57.000Z
|
2021-11-08T10:32:57.000Z
|
from __future__ import annotations
import inspect
import threading
import weakref
from types import MethodType
from typing import (
Any,
Callable,
Generic,
Iterator,
List,
MutableSet,
Optional,
Type,
TypeVar,
cast,
)
__all__ = ["EventIterator", "Event"]
_TResult = TypeVar("_TResult")
_TCallable = TypeVar("_TCallable", bound=Callable[..., Any])
_TEvent = TypeVar("_TEvent")
| 31.358333
| 107
| 0.632208
|
from __future__ import annotations
import inspect
import threading
import weakref
from types import MethodType
from typing import (
Any,
Callable,
Generic,
Iterator,
List,
MutableSet,
Optional,
Type,
TypeVar,
cast,
)
__all__ = ["EventIterator", "Event"]
_TResult = TypeVar("_TResult")
_TCallable = TypeVar("_TCallable", bound=Callable[..., Any])
class EventResultIteratorBase(Generic[_TCallable, _TResult]):
def __init__(self) -> None:
self._lock = threading.RLock()
self._listeners: MutableSet[weakref.ref[Any]] = set()
def add(self, callback: _TCallable) -> None:
def remove_listener(ref: Any) -> None:
with self._lock:
self._listeners.remove(ref)
with self._lock:
if inspect.ismethod(callback):
self._listeners.add(weakref.WeakMethod(cast(MethodType, callback), remove_listener))
else:
self._listeners.add(weakref.ref(callback, remove_listener))
def remove(self, callback: _TCallable) -> None:
with self._lock:
try:
if inspect.ismethod(callback):
self._listeners.remove(weakref.WeakMethod(cast(MethodType, callback)))
else:
self._listeners.remove(weakref.ref(callback))
except KeyError:
pass
def __contains__(self, obj: Any) -> bool:
if inspect.ismethod(obj):
return weakref.WeakMethod(cast(MethodType, obj)) in self._listeners
else:
return weakref.ref(obj) in self._listeners
def __len__(self) -> int:
return len(self._listeners)
def __iter__(self) -> Iterator[_TCallable]:
for r in self._listeners:
c = r()
if c is not None:
yield c
def _notify(self, *args: Any, **kwargs: Any) -> Iterator[_TResult]:
for method in set(self):
yield method(*args, **kwargs)
class EventIterator(EventResultIteratorBase[_TCallable, _TResult]):
def __call__(self, *args: Any, **kwargs: Any) -> Iterator[_TResult]:
return self._notify(*args, **kwargs)
class Event(EventResultIteratorBase[_TCallable, _TResult]):
def __call__(self, *args: Any, **kwargs: Any) -> List[_TResult]:
return [a for a in self._notify(*args, **kwargs)]
_TEvent = TypeVar("_TEvent")
class EventDescriptorBase(Generic[_TCallable, _TResult, _TEvent]):
def __init__(
self, _func: _TCallable, factory: Callable[..., _TEvent], *factory_args: Any, **factory_kwargs: Any
) -> None:
self._func = _func
self.__factory = factory
self.__factory_args = factory_args
self.__factory_kwargs = factory_kwargs
self._owner: Optional[Any] = None
self._owner_name: Optional[str] = None
def __set_name__(self, owner: Any, name: str) -> None:
self._owner = owner
self._owner_name = name
def __get__(self, obj: Any, objtype: Type[Any]) -> _TEvent:
if obj is None:
return self # type: ignore
name = f"__event_{self._func.__name__}__"
if not hasattr(obj, name):
setattr(obj, name, self.__factory(*self.__factory_args, **self.__factory_kwargs))
return cast("_TEvent", getattr(obj, name))
class event_iterator(EventDescriptorBase[_TCallable, Any, EventIterator[_TCallable, Any]]): # noqa: N801
def __init__(self, _func: _TCallable) -> None:
super().__init__(_func, EventIterator[_TCallable, _TResult])
class event(EventDescriptorBase[_TCallable, Any, Event[_TCallable, Any]]): # noqa: N801
def __init__(self, _func: _TCallable) -> None:
super().__init__(_func, Event[_TCallable, _TResult])
| 2,507
| 320
| 510
|
f7f42f0ea5da15bfbe7816400c336946ad55b119
| 3,153
|
py
|
Python
|
validation/new/bare_model.py
|
FoVNull/MFDSL
|
8c6fc99260c1c02f4f45cfb14a111028d2a96ded
|
[
"MIT"
] | 2
|
2021-12-08T05:45:19.000Z
|
2022-03-04T01:00:32.000Z
|
validation/new/bare_model.py
|
FoVNull/MFDSL
|
8c6fc99260c1c02f4f45cfb14a111028d2a96ded
|
[
"MIT"
] | null | null | null |
validation/new/bare_model.py
|
FoVNull/MFDSL
|
8c6fc99260c1c02f4f45cfb14a111028d2a96ded
|
[
"MIT"
] | null | null | null |
from typing import Dict, Any
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from kashgari_local.abc_feature_model import ABCClassificationModel
from kashgari.layers import L
| 33.903226
| 100
| 0.607358
|
from typing import Dict, Any
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from kashgari_local.abc_feature_model import ABCClassificationModel
from kashgari.layers import L
class Bare_Model(ABCClassificationModel):
def __init__(self, embedding, **params):
super().__init__(embedding)
self.feature_D = params["feature_D"]
@classmethod
def default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
activation_function list:
{softmax, elu, selu, softplus, softsign, swish,
relu, gelu, tanh, sigmoid, exponential,
hard_sigmoid, linear, serialize, deserialize, get}
"""
return {
'layer_bilstm1': {
'units': 128,
'return_sequences': True
},
'layer_time_distributed': {},
'conv_layer1': {
'filters': 128,
'kernel_size': 4,
'padding': 'valid',
'activation': 'relu'
},
'layer_output1': {
'activation': 'softmax'
},
}
def build_model_arc(self):
"""
build model architectural
BiLSTM + Convolution + Attention
"""
features = tf.keras.Input(shape=(None, self.feature_D), name="features")
l1_reg = tf.keras.regularizers.l1(0.01)
l2_reg = tf.keras.regularizers.L2(0.01)
output_dim = self.label_processor.vocab_size
config = self.hyper_parameters
embed_model = self.embedding.embed_model
# Define layers for BiLSTM
layer_stack = [
L.Bidirectional(L.LSTM(**config['layer_bilstm1'])),
L.Dropout(rate=0.2),
]
# tensor flow in Layers {tensor:=layer(tensor)}
tensor = embed_model.output
for layer in layer_stack:
tensor = layer(tensor)
# extend features
features_tensor = L.Dense(64, kernel_regularizer=l1_reg)(features)
# tensor = L.Concatenate(axis=-1)([features_tensor, tensor])
query = L.Concatenate(axis=-1)([tensor, features_tensor])
key = L.Concatenate(axis=-1)([features_tensor, tensor])
query_value_attention_seq = L.Attention()([query, key])
# query_value_attention_seq = L.MultiHeadAttention(
# num_heads=4, key_dim=2, dropout=0.5
# )(tensor, tensor)
query_encoding = L.GlobalMaxPool1D()(query)
query_value_attention = L.GlobalMaxPool1D()(query_value_attention_seq)
input_tensor = L.Concatenate(axis=1)([query_encoding, query_value_attention])
# output tensor
input_tensor = L.Dropout(rate=0.1)(input_tensor)
output_tensor = L.Dense(
output_dim, activation='sigmoid', name="output0",
kernel_regularizer=l2_reg
)(input_tensor)
self.tf_model = tf.keras.Model(inputs=[embed_model.inputs, features], outputs=output_tensor)
# plot_model(self.tf_model, to_file="D:/PycProject/TripleC/reference/model.png")
| 100
| 2,831
| 23
|
2060a376f3709a1461c8c0665bd025049e2615a2
| 7,773
|
py
|
Python
|
stiny/__init__.py
|
stevearc/stiny
|
9577433a167beb548ce1db19d46f163a6624c334
|
[
"MIT"
] | null | null | null |
stiny/__init__.py
|
stevearc/stiny
|
9577433a167beb548ce1db19d46f163a6624c334
|
[
"MIT"
] | null | null | null |
stiny/__init__.py
|
stevearc/stiny
|
9577433a167beb548ce1db19d46f163a6624c334
|
[
"MIT"
] | null | null | null |
""" Stiny - A home automation assistant """
import os
import posixpath
import calendar
import datetime
import json
import logging
import requests
from collections import defaultdict
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.httpexceptions import exception_response
from pyramid.renderers import JSON, render, render_to_response
from pyramid.settings import asbool, aslist
from pyramid_beaker import session_factory_from_settings
from twilio.util import RequestValidator
from .gutil import Calendar, normalize_email
LOG = logging.getLogger(__name__)
def to_json(value):
""" A json filter for jinja2 """
return render('json', value)
json_renderer = JSON() # pylint: disable=C0103
json_renderer.add_adapter(datetime.datetime, lambda obj, r:
1000 * calendar.timegm(obj.utctimetuple()))
json_renderer.add_adapter(datetime.date,
lambda obj, _: obj.isoformat())
json_renderer.add_adapter(defaultdict,
lambda obj, _: dict(obj))
json_renderer.add_adapter(Exception,
lambda e, _: str(e))
def _error(request, error, message='Unknown error', status_code=500):
"""
Construct an error response
Parameters
----------
error : str
Identifying error key
message : str, optional
Human-readable error message
status_code : int, optional
HTTP return code (default 500)
"""
data = {
'error': error,
'msg': message,
}
LOG.error("%s: %s", error, message)
request.response.status_code = status_code
return render_to_response('json', data, request, response=request.response)
def _raise_error(request, error, message='Unknown error', status_code=500):
"""
Raise an error response.
Use this when you need to return an error to the client from inside of
nested function calls.
Parameters
----------
error : str
Identifying error key
message : str, optional
Human-readable error message
status_code : int, optional
HTTP return code (default 500)
"""
err = exception_response(status_code, detail=message)
err.error = error
raise err
def _auth_callback(userid, request):
""" Get permissions for a user with an email. """
n_userid = normalize_email(userid)
perms = []
# If permissions are declared in the config.ini file, just use those.
setting = request.registry.settings.get('auth.' + n_userid)
if setting is not None:
principals = aslist(setting)
else:
principals = []
if request.cal.is_guest(n_userid):
principals.append('unlock')
perms.extend(principals)
return perms
def includeme(config):
""" Set up and configure the app """
settings = config.get_settings()
config.include('pyramid_beaker')
config.include('pyramid_duh')
config.include('pyramid_webpack')
config.include('stiny.route')
config.add_renderer('json', json_renderer)
# Jinja2 configuration
settings['jinja2.filters'] = {
'static_url': 'pyramid_jinja2.filters:static_url_filter',
'json': to_json,
}
settings['jinja2.directories'] = ['stiny:templates']
settings['jinja2.extensions'] = ['pyramid_webpack.jinja2ext:WebpackExtension']
config.include('pyramid_jinja2')
config.commit()
# Beaker configuration
settings.setdefault('session.type', 'cookie')
settings.setdefault('session.httponly', 'true')
config.set_session_factory(session_factory_from_settings(settings))
config.set_default_csrf_options(require_csrf=True, token=None)
# Set admins from environment variable for local development
if 'STINY_ADMINS' in os.environ:
for email in aslist(os.environ['STINY_ADMINS']):
email = normalize_email(email)
settings['auth.' + email] = 'admin'
# Set guests from environment variable for local development
if 'STINY_GUESTS' in os.environ:
for email in aslist(os.environ['STINY_GUESTS']):
email = normalize_email(email)
settings['auth.' + email] = 'unlock'
# Special request methods
config.add_request_method(_error, name='error')
config.add_request_method(_raise_error, name='raise_error')
config.add_request_method(lambda r, *a, **k: r.route_url('root', *a, **k),
name='rooturl')
config.add_request_method(lambda r, u: _auth_callback(u, r),
name='user_principals')
config.add_request_method(lambda r: r.registry.settings.get('google.client_id'),
name='google_client_id', reify=True)
config.registry.phone_access = aslist(settings.get('phone_access', []))
config.add_static_view(name='static', path='stiny:static',
cache_max_age=10 * 365 * 24 * 60 * 60)
# Auth
config.set_authorization_policy(ACLAuthorizationPolicy())
config.set_authentication_policy(AuthTktAuthenticationPolicy(
secret=settings['authtkt.secret'],
cookie_name=settings.get('auth.cookie_name', 'auth_tkt'),
secure=asbool(settings.get('auth.secure', False)),
timeout=int(settings.get('auth.timeout', 60 * 60 * 24 * 30)),
reissue_time=int(settings.get('auth.reissue_time', 60 * 60 * 24 * 15)),
max_age=int(settings.get('auth.max_age', 60 * 60 * 24 * 30)),
http_only=asbool(settings.get('auth.http_only', True)),
hashalg='sha512',
callback=_auth_callback,
))
config.set_default_permission('default')
# Calendar
config.registry.GOOGLE_WEB_CLIENT_ID = settings.setdefault(
'google.client_id',
os.environ.get('STINY_DEV_CLIENT_GOOGLE_CLIENT_ID'))
server_client_id = settings.get('google.server_client_id')
if server_client_id is None:
server_client_id = os.environ['STINY_SERVER_GOOGLE_CLIENT_ID']
config.registry.GOOGLE_CLIENT_ID = server_client_id
client_secret = settings.get('google.server_client_secret')
if client_secret is None:
client_secret = os.environ['STINY_SERVER_GOOGLE_CLIENT_SECRET']
cal_id = settings.get('google.calendar_id')
if cal_id is None:
cal_id = os.environ['STINY_CAL_ID']
cal = Calendar(server_client_id, client_secret, calendar_id=cal_id)
config.registry.calendar = cal
config.add_request_method(lambda r: r.registry.calendar, 'cal', reify=True)
twilio_token = settings.get('twilio.auth_token')
if twilio_token is None:
twilio_token = os.environ['STINY_TWILIO_AUTH_TOKEN']
config.registry.twilio_validator = RequestValidator(twilio_token)
config.add_request_method(_validate_twilio, name='validate_twilio')
config.add_request_method(_call_worker, name='call_worker')
config.scan()
def main(config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('stiny')
return config.make_wsgi_app()
| 34.856502
| 84
| 0.684034
|
""" Stiny - A home automation assistant """
import os
import posixpath
import calendar
import datetime
import json
import logging
import requests
from collections import defaultdict
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.httpexceptions import exception_response
from pyramid.renderers import JSON, render, render_to_response
from pyramid.settings import asbool, aslist
from pyramid_beaker import session_factory_from_settings
from twilio.util import RequestValidator
from .gutil import Calendar, normalize_email
LOG = logging.getLogger(__name__)
def to_json(value):
""" A json filter for jinja2 """
return render('json', value)
json_renderer = JSON() # pylint: disable=C0103
json_renderer.add_adapter(datetime.datetime, lambda obj, r:
1000 * calendar.timegm(obj.utctimetuple()))
json_renderer.add_adapter(datetime.date,
lambda obj, _: obj.isoformat())
json_renderer.add_adapter(defaultdict,
lambda obj, _: dict(obj))
json_renderer.add_adapter(Exception,
lambda e, _: str(e))
def _error(request, error, message='Unknown error', status_code=500):
"""
Construct an error response
Parameters
----------
error : str
Identifying error key
message : str, optional
Human-readable error message
status_code : int, optional
HTTP return code (default 500)
"""
data = {
'error': error,
'msg': message,
}
LOG.error("%s: %s", error, message)
request.response.status_code = status_code
return render_to_response('json', data, request, response=request.response)
def _raise_error(request, error, message='Unknown error', status_code=500):
"""
Raise an error response.
Use this when you need to return an error to the client from inside of
nested function calls.
Parameters
----------
error : str
Identifying error key
message : str, optional
Human-readable error message
status_code : int, optional
HTTP return code (default 500)
"""
err = exception_response(status_code, detail=message)
err.error = error
raise err
def _auth_callback(userid, request):
""" Get permissions for a user with an email. """
n_userid = normalize_email(userid)
perms = []
# If permissions are declared in the config.ini file, just use those.
setting = request.registry.settings.get('auth.' + n_userid)
if setting is not None:
principals = aslist(setting)
else:
principals = []
if request.cal.is_guest(n_userid):
principals.append('unlock')
perms.extend(principals)
return perms
def _validate_twilio(request):
signature = request.headers.get('X-Twilio-Signature')
if signature is None:
return False
validator = request.registry.twilio_validator
return validator.validate(request.url, {}, signature)
def _call_worker(request, worker, command, **kwargs):
host = request.registry.settings['worker.' + worker]
fullpath = host + '/do/' + command
headers = {'content-type': 'application/json'}
response = requests.post(fullpath, data=json.dumps(kwargs),
headers=headers)
response.raise_for_status()
return response.json()
def includeme(config):
""" Set up and configure the app """
settings = config.get_settings()
config.include('pyramid_beaker')
config.include('pyramid_duh')
config.include('pyramid_webpack')
config.include('stiny.route')
config.add_renderer('json', json_renderer)
# Jinja2 configuration
settings['jinja2.filters'] = {
'static_url': 'pyramid_jinja2.filters:static_url_filter',
'json': to_json,
}
settings['jinja2.directories'] = ['stiny:templates']
settings['jinja2.extensions'] = ['pyramid_webpack.jinja2ext:WebpackExtension']
config.include('pyramid_jinja2')
config.commit()
# Beaker configuration
settings.setdefault('session.type', 'cookie')
settings.setdefault('session.httponly', 'true')
config.set_session_factory(session_factory_from_settings(settings))
config.set_default_csrf_options(require_csrf=True, token=None)
# Set admins from environment variable for local development
if 'STINY_ADMINS' in os.environ:
for email in aslist(os.environ['STINY_ADMINS']):
email = normalize_email(email)
settings['auth.' + email] = 'admin'
# Set guests from environment variable for local development
if 'STINY_GUESTS' in os.environ:
for email in aslist(os.environ['STINY_GUESTS']):
email = normalize_email(email)
settings['auth.' + email] = 'unlock'
# Special request methods
config.add_request_method(_error, name='error')
config.add_request_method(_raise_error, name='raise_error')
config.add_request_method(lambda r, *a, **k: r.route_url('root', *a, **k),
name='rooturl')
config.add_request_method(lambda r, u: _auth_callback(u, r),
name='user_principals')
config.add_request_method(lambda r: r.registry.settings.get('google.client_id'),
name='google_client_id', reify=True)
config.registry.phone_access = aslist(settings.get('phone_access', []))
config.add_static_view(name='static', path='stiny:static',
cache_max_age=10 * 365 * 24 * 60 * 60)
# Auth
config.set_authorization_policy(ACLAuthorizationPolicy())
config.set_authentication_policy(AuthTktAuthenticationPolicy(
secret=settings['authtkt.secret'],
cookie_name=settings.get('auth.cookie_name', 'auth_tkt'),
secure=asbool(settings.get('auth.secure', False)),
timeout=int(settings.get('auth.timeout', 60 * 60 * 24 * 30)),
reissue_time=int(settings.get('auth.reissue_time', 60 * 60 * 24 * 15)),
max_age=int(settings.get('auth.max_age', 60 * 60 * 24 * 30)),
http_only=asbool(settings.get('auth.http_only', True)),
hashalg='sha512',
callback=_auth_callback,
))
config.set_default_permission('default')
# Calendar
config.registry.GOOGLE_WEB_CLIENT_ID = settings.setdefault(
'google.client_id',
os.environ.get('STINY_DEV_CLIENT_GOOGLE_CLIENT_ID'))
server_client_id = settings.get('google.server_client_id')
if server_client_id is None:
server_client_id = os.environ['STINY_SERVER_GOOGLE_CLIENT_ID']
config.registry.GOOGLE_CLIENT_ID = server_client_id
client_secret = settings.get('google.server_client_secret')
if client_secret is None:
client_secret = os.environ['STINY_SERVER_GOOGLE_CLIENT_SECRET']
cal_id = settings.get('google.calendar_id')
if cal_id is None:
cal_id = os.environ['STINY_CAL_ID']
cal = Calendar(server_client_id, client_secret, calendar_id=cal_id)
config.registry.calendar = cal
config.add_request_method(lambda r: r.registry.calendar, 'cal', reify=True)
twilio_token = settings.get('twilio.auth_token')
if twilio_token is None:
twilio_token = os.environ['STINY_TWILIO_AUTH_TOKEN']
config.registry.twilio_validator = RequestValidator(twilio_token)
config.add_request_method(_validate_twilio, name='validate_twilio')
config.add_request_method(_call_worker, name='call_worker')
config.scan()
def main(config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('stiny')
return config.make_wsgi_app()
| 570
| 0
| 46
|
fcfe2ea3b1145664ae4996cace5cb3b01ec0ed4b
| 776
|
py
|
Python
|
utils/get_params.py
|
sk1t0n/tg-bot-parser-hhru
|
abb59ea39ce0fb51fcc14f8b0195d2e9ba8d435e
|
[
"MIT"
] | null | null | null |
utils/get_params.py
|
sk1t0n/tg-bot-parser-hhru
|
abb59ea39ce0fb51fcc14f8b0195d2e9ba8d435e
|
[
"MIT"
] | null | null | null |
utils/get_params.py
|
sk1t0n/tg-bot-parser-hhru
|
abb59ea39ce0fb51fcc14f8b0195d2e9ba8d435e
|
[
"MIT"
] | null | null | null |
"""
Creates a URL to search jobs on specific filters.
"""
from typing import Dict, Union
import config
experience = {
'No': 'noExperience',
'1-3': 'between1And3',
'3-6': 'between3And6',
'More 6': 'moreThan6'
}
areas = {
'Moscow': '1',
'StPetersburg': '2',
'Krasnodar': '53'
}
| 21.555556
| 59
| 0.596649
|
"""
Creates a URL to search jobs on specific filters.
"""
from typing import Dict, Union
import config
experience = {
'No': 'noExperience',
'1-3': 'between1And3',
'3-6': 'between3And6',
'More 6': 'moreThan6'
}
areas = {
'Moscow': '1',
'StPetersburg': '2',
'Krasnodar': '53'
}
def create_url(filters: Dict[str, Union[str, int]]):
url = '{}?{}={}&{}={}&{}={}&{}={}'.format(
config.URL_JOB_SEARCH_HHRU,
config.PARAM_HHRU_QUERY,
filters[config.PARAM_HHRU_QUERY].replace(' ', '+'),
config.PARAM_HHRU_EXP,
filters[config.PARAM_HHRU_EXP],
config.PARAM_HHRU_AREA,
filters[config.PARAM_HHRU_AREA],
config.PARAM_HHRU_PAGE,
filters[config.PARAM_HHRU_PAGE]
)
return url
| 444
| 0
| 23
|
f4115829a9a942fe7b9671300eb6856d3abd38c8
| 2,998
|
py
|
Python
|
Houdini/Handlers/Play/Item.py
|
amrmashriqi/Houdini
|
f91687a305ad88e2043057fd001620639bb5af0a
|
[
"MIT"
] | null | null | null |
Houdini/Handlers/Play/Item.py
|
amrmashriqi/Houdini
|
f91687a305ad88e2043057fd001620639bb5af0a
|
[
"MIT"
] | null | null | null |
Houdini/Handlers/Play/Item.py
|
amrmashriqi/Houdini
|
f91687a305ad88e2043057fd001620639bb5af0a
|
[
"MIT"
] | null | null | null |
from beaker.cache import cache_region as Cache, region_invalidate as Invalidate
from Houdini.Handlers import Handlers, XT
from Houdini.Handlers.Play.Moderation import cheatBan
from Houdini.Data.Penguin import Inventory
cardStarterDeckId = 821
fireBoosterDeckId = 8006
waterBoosterDeckId = 8010
boosterDecks = {
cardStarterDeckId: [1, 6, 9, 14, 17, 20, 22, 23, 26, 73, 89, 81],
fireBoosterDeckId: [3, 18, 216, 222, 229, 303, 304, 314, 319, 250, 352],
waterBoosterDeckId: [202, 204, 305, 15, 13, 312, 218, 220, 29, 90]
}
@Handlers.Handle(XT.BuyInventory)
@Handlers.Handle(XT.GetInventory)
@Handlers.Throttle(-1)
@Cache('houdini', 'pins')
@Cache('houdini', 'awards')
@Handlers.Handle(XT.GetPlayerPins)
@Handlers.Throttle()
@Handlers.Handle(XT.GetPlayerAwards)
@Handlers.Throttle()
| 34.45977
| 93
| 0.704803
|
from beaker.cache import cache_region as Cache, region_invalidate as Invalidate
from Houdini.Handlers import Handlers, XT
from Houdini.Handlers.Play.Moderation import cheatBan
from Houdini.Data.Penguin import Inventory
cardStarterDeckId = 821
fireBoosterDeckId = 8006
waterBoosterDeckId = 8010
boosterDecks = {
cardStarterDeckId: [1, 6, 9, 14, 17, 20, 22, 23, 26, 73, 89, 81],
fireBoosterDeckId: [3, 18, 216, 222, 229, 303, 304, 314, 319, 250, 352],
waterBoosterDeckId: [202, 204, 305, 15, 13, 312, 218, 220, 29, 90]
}
@Handlers.Handle(XT.BuyInventory)
def handleBuyInventory(self, data):
if data.ItemId not in self.server.items:
return self.sendError(402)
elif data.ItemId in self.inventory:
return self.sendError(400)
if self.server.items.isBait(data.ItemId):
return cheatBan(self, self.user.ID, comment="Added bait item")
if self.server.items.isTourGuide(data.ItemId):
self.receiveSystemPostcard(126)
if data.ItemId in boosterDecks:
self.addCards(*boosterDecks[data.ItemId])
itemCost = self.server.items.getCost(data.ItemId)
if self.user.Coins < itemCost:
return self.sendError(401)
self.addItem(data.ItemId, itemCost)
Invalidate(getPinString, 'houdini', 'pins', self.user.ID)
Invalidate(getAwardsString, 'houdini', 'awards', self.user.ID)
@Handlers.Handle(XT.GetInventory)
@Handlers.Throttle(-1)
def handleGetInventory(self, data):
self.sendXt("gi", "%".join(map(str, self.inventory)))
@Cache('houdini', 'pins')
def getPinString(self, penguinId):
def getString(pinId):
isMember = int(self.server.items[pinId].Member)
timestamp = self.server.pins.getUnixTimestamp(pinId)
return "|".join(map(str, [pinId, timestamp, isMember]))
if penguinId in self.server.players:
pinsArray = [getString(itemId) for itemId in self.server.players[penguinId].inventory
if self.server.items.isItemPin(itemId)]
else:
pinsArray = [getString(itemId) for itemId, in self.session.query(Inventory.ItemID)
.filter_by(PenguinID=penguinId) if self.server.items.isItemPin(itemId)]
return "%".join(pinsArray)
@Cache('houdini', 'awards')
def getAwardsString(self, penguinId):
if penguinId in self.server.players:
awardsArray = [str(itemId) for itemId in self.server.players[penguinId].inventory
if self.server.items.isItemAward(itemId)]
else:
awardsArray = [str(itemId) for itemId, in self.session.query(Inventory.ItemID)
.filter_by(PenguinID=penguinId) if self.server.items.isItemAward(itemId)]
return "|".join(awardsArray)
@Handlers.Handle(XT.GetPlayerPins)
@Handlers.Throttle()
def handleGetPlayerPins(self, data):
self.sendXt("qpp", getPinString(self, data.PlayerId))
@Handlers.Handle(XT.GetPlayerAwards)
@Handlers.Throttle()
def handleGetPlayerAwards(self, data):
self.sendXt("qpa", data.PlayerId, getAwardsString(self, data.PlayerId))
| 2,064
| 0
| 132
|
2ec159a3ea2dc9bd376faa5ae61d365d7adf7567
| 20,985
|
py
|
Python
|
main.py
|
YOULOF2/Galactika
|
de1964ae0643590b614d7d8ee721d69926bb1075
|
[
"MIT"
] | 1
|
2021-03-25T10:43:55.000Z
|
2021-03-25T10:43:55.000Z
|
main.py
|
YOULOF2/Galactika
|
de1964ae0643590b614d7d8ee721d69926bb1075
|
[
"MIT"
] | null | null | null |
main.py
|
YOULOF2/Galactika
|
de1964ae0643590b614d7d8ee721d69926bb1075
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, render_template, redirect, url_for, flash, abort, send_from_directory
from flask_bootstrap import Bootstrap
from flask_ckeditor import CKEditor
from datetime import date
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
from flask_login import UserMixin, login_user, LoginManager, current_user, logout_user
from forms import SettingsForm, CreatePostForm, RegisterForm, LoginForm, CommentForm
from flask_gravatar import Gravatar
from functools import wraps
import os
import requests
from errors import *
from wallpapers import WALLPAPERS
from dotenv import load_dotenv
from PyPDF2 import PdfFileMerger, PdfFileReader
import os
import requests
from random import choice
import json
from flask_weasyprint import HTML, render_pdf, CSS
from time import sleep
load_dotenv()
# ==================================================================================================================== #
HASHING_METHOD = "pbkdf2:sha256"
SALT_TIMES = 8
APP_SECRET_KEY = os.environ.get("APP_SECRET_KEY")
DATABASE_URL = os.environ.get("DATABASE_URL", "sqlite:///blog.db")
NEWS_API_KEY = os.environ.get("NEWS_API_KEY")
ENDPOINT = "http://newsapi.org/v2/top-headlines"
DEFAULT_BG = "https://images.unsplash.com/photo-1464802686167-b939a6910659?crop=entropy&cs=srgb&fm=jpg&ixid" \
"=MnwyMTQyMTB8MHwxfHNlYXJjaHwxfHxzcGFjZXxlbnwwfDB8fHwxNjE1ODQzNjk2&ixlib=rb-1.2.1&q=85"
wallpapers = [wallpaper["urls"]["regular"] for wallpaper in WALLPAPERS[:50]]
# ==================================================================================================================== #
app = Flask(__name__)
app.config['SECRET_KEY'] = APP_SECRET_KEY
ckeditor = CKEditor(app)
Bootstrap(app)
# ==================================================================================================================== #
# CONNECT TO DB
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# ==================================================================================================================== #
gravatar = Gravatar(app,
size=100,
rating='g',
default='retro',
force_default=False,
force_lower=False,
use_ssl=False,
base_url=None)
# ==================================================================================================================== #
login_manager = LoginManager()
login_manager.init_app(app)
# ==================================================================================================================== #
# Functions
# ==================================================================================================================== #
# CONFIGURE TABLES
# ==================================================================================================================== #
@login_manager.user_loader
# db.create_all()
# ==================================================================================================================== #
@app.route('/register', methods=["GET", "POST"])
@app.route('/login', methods=["GET", "POST"])
@app.route('/logout')
@app.route("/delete_user/<user_id>", methods=["POST", "GET"])
@app.route("/user-settings/<int:user_id>", methods=["POST", "GET"])
@app.route("/transfer_to_settings")
@app.route("/setwallpaper/<int:wallpaper_number>")
@app.route("/magazine", methods=["GET", "POST"])
# ==================================================================================================================== #
# Home page
featured_posts = get_top_news()
@app.route("/")
@app.route("/refresh-news")
@app.route("/flash-news")
# ==================================================================================================================== #
@app.route('/blog')
@app.route("/blog/post/<int:post_id>", methods=["GET", "POST"])
@app.route("/blog/new-post", methods=["POST", "GET"])
@admin_only
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# Admin Panel #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
@app.route("/admin_panel")
@admin_only
@app.route("/acess/<acess_type>/<action>/<user_id>")
@admin_only
@app.route("/edit-post/<int:post_id>", methods=["GET", "POST"])
@admin_only
@app.route("/delete/<int:post_id>")
@admin_only
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# Not found pages
@app.errorhandler(404)
@app.errorhandler(403)
@app.errorhandler(500)
# ==================================================================================================================== #
if __name__ == "__main__":
app.run(debug=True)
| 39.744318
| 121
| 0.530474
|
from flask import Flask, request, render_template, redirect, url_for, flash, abort, send_from_directory
from flask_bootstrap import Bootstrap
from flask_ckeditor import CKEditor
from datetime import date
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
from flask_login import UserMixin, login_user, LoginManager, current_user, logout_user
from forms import SettingsForm, CreatePostForm, RegisterForm, LoginForm, CommentForm
from flask_gravatar import Gravatar
from functools import wraps
import os
import requests
from errors import *
from wallpapers import WALLPAPERS
from dotenv import load_dotenv
from PyPDF2 import PdfFileMerger, PdfFileReader
import os
import requests
from random import choice
import json
from flask_weasyprint import HTML, render_pdf, CSS
from time import sleep
load_dotenv()
# ==================================================================================================================== #
HASHING_METHOD = "pbkdf2:sha256"
SALT_TIMES = 8
APP_SECRET_KEY = os.environ.get("APP_SECRET_KEY")
DATABASE_URL = os.environ.get("DATABASE_URL", "sqlite:///blog.db")
NEWS_API_KEY = os.environ.get("NEWS_API_KEY")
ENDPOINT = "http://newsapi.org/v2/top-headlines"
DEFAULT_BG = "https://images.unsplash.com/photo-1464802686167-b939a6910659?crop=entropy&cs=srgb&fm=jpg&ixid" \
"=MnwyMTQyMTB8MHwxfHNlYXJjaHwxfHxzcGFjZXxlbnwwfDB8fHwxNjE1ODQzNjk2&ixlib=rb-1.2.1&q=85"
wallpapers = [wallpaper["urls"]["regular"] for wallpaper in WALLPAPERS[:50]]
# ==================================================================================================================== #
app = Flask(__name__)
app.config['SECRET_KEY'] = APP_SECRET_KEY
ckeditor = CKEditor(app)
Bootstrap(app)
# ==================================================================================================================== #
# CONNECT TO DB
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# ==================================================================================================================== #
gravatar = Gravatar(app,
size=100,
rating='g',
default='retro',
force_default=False,
force_lower=False,
use_ssl=False,
base_url=None)
# ==================================================================================================================== #
login_manager = LoginManager()
login_manager.init_app(app)
# ==================================================================================================================== #
# Functions
def admin_only(function):
@wraps(function)
def wrapper_function(*args, **kwargs):
if len(User.query.all()) == 1:
return function(*args, **kwargs)
else:
try:
user_id = int(current_user.get_id())
except TypeError:
return abort(403)
else:
user = User.query.get(user_id)
if user.admin_acess:
return function(*args, **kwargs)
else:
return abort(403)
return wrapper_function
def is_writer():
try:
user_id = int(current_user.get_id())
except TypeError:
return False
else:
user = User.query.get(user_id)
if user.writer_acess:
return True
return False
def get_top_news():
parameters = {
"country": "us",
"category": "technology",
"apiKey": NEWS_API_KEY
}
response = requests.get(url=ENDPOINT, params=parameters)
response.raise_for_status()
try:
return response.json()["articles"]
except KeyError:
return abort(500)
def get_favourite_wallpaper():
user_id = current_user.get_id()
if user_id is not None:
user = User.query.get(user_id)
return user.favourite_bg
else:
return DEFAULT_BG
# ==================================================================================================================== #
# CONFIGURE TABLES
class User(db.Model, UserMixin):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
# ********** Add Children Relationship ********** #
posts = relationship("BlogPost", back_populates="author")
comments = relationship("Comment", back_populates="comment_author")
# *********************************************** #
email = db.Column(db.String(250), nullable=False, unique=True)
password = db.Column(db.String(250), nullable=False)
name = db.Column(db.String(250), nullable=False)
admin_acess = db.Column(db.Boolean, nullable=False)
writer_acess = db.Column(db.Boolean, nullable=False)
favourite_bg = db.Column(db.String(250), nullable=False)
class BlogPost(db.Model):
__tablename__ = "blogposts"
id = db.Column(db.Integer, primary_key=True)
# ********** Add Parent Relationship ********** #
# Create Foreign Key, "user_data.id" the users refers to the tablename of User.
author_id = db.Column(db.Integer, db.ForeignKey("users.id"))
# Create reference to the User object, the "posts" refers to the posts protperty in the User class.
author = relationship("User", back_populates="posts")
# ********************************************* #
# ********** Add Children Relationship ********** #
comments = relationship("Comment", back_populates="parent_post")
# ********************************************* #
title = db.Column(db.String(250), unique=True, nullable=False)
subtitle = db.Column(db.String(250), nullable=False)
date = db.Column(db.String(250), nullable=False)
body = db.Column(db.Text, nullable=False)
img_url = db.Column(db.String(250), nullable=False)
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
# ********** Add Parent Relationship ********** #
author_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comment_author = relationship("User", back_populates="comments")
parent_post = relationship("BlogPost", back_populates="comments")
post_id = db.Column(db.Integer, db.ForeignKey("blogposts.id"))
# ********************************************* #
text = db.Column(db.Text, nullable=False)
class NewsLetterMaker:
def __init__(self):
self.merger = PdfFileMerger()
self.issue_location = "static/newsletter/pdfs/final_issue.pdf"
self.issue_pages = ["static/newsletter/pdfs/cover_page.pdf", "page1.pdf", "page2.pdf", "page3.pdf", "page4.pdf"]
trivia_questions = requests.get(url="https://opentdb.com/api.php?amount=5&type=boolean").json()["results"]
with open("quotes.json") as file:
file_data = json.load(file)["quotes"]
self.random_quotes = []
for i in range(6):
random_quote = choice(file_data)
self.random_quotes.append(random_quote)
self.all_data = {
"trivia": trivia_questions,
"quotes": self.random_quotes
}
def make_magic(self):
"""
This method takes the relevantint information in and automaticaly produces a pdf.
According to self.issue_pages list, the pages are made.
The output location can be changed by changing the self.issue_location string value.
"""
try:
os.remove(self.issue_location)
for file in self.issue_pages[1:]:
os.remove(file)
except FileNotFoundError:
pass
# for i in range(1, len(self.issue_pages)):
html = render_template(f"newsletter/page{3}.html", all_data=self.all_data, encoding="UTF-8")
# clean_html = html.replace(""", '"')
# with open("test_file.html", "w") as file:
# file.write(clean_html)
file = HTML(string=html)
file.write_pdf(f"page{3}.pdf")
# # sleep(5)
# for file in self.issue_pages:
# self.merger.append(PdfFileReader(open(file, 'rb')), import_bookmarks=False)
# self.merger.write(self.issue_location)
# ==================================================================================================================== #
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
# db.create_all()
# ==================================================================================================================== #
@app.route('/register', methods=["GET", "POST"])
def register():
form = RegisterForm()
if form.validate_on_submit():
if User.query.filter_by(email=form.email.data).first() is None:
hashed_password = generate_password_hash(form.password.data, method=HASHING_METHOD, salt_length=SALT_TIMES)
new_user = User(
email=form.email.data,
password=hashed_password,
name=form.user_name.data,
admin_acess=False,
writer_acess=False,
favourite_bg=DEFAULT_BG
)
db.session.add(new_user)
db.session.commit()
login_user(new_user)
return redirect(url_for("home"))
else:
flash(ALREADY_LOGGED_IN_ERROR)
return redirect(url_for("login"))
return render_template("register.html", form=form, task="Register", favourite_bg=get_favourite_wallpaper())
@app.route('/login', methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None:
user_hashed_pass = user.password
correct_password = check_password_hash(user_hashed_pass, form.password.data)
if correct_password:
login_user(user)
return redirect(url_for("home"))
else:
flash(PASSWORD_ERROR)
return render_template("login.html", form=form, task="Login", favourite_bg=get_favourite_wallpaper())
else:
flash(EMAIL_ERROR)
return render_template("login.html", form=form, task="Login", favourite_bg=get_favourite_wallpaper())
return render_template("login.html", form=form, task="Login", favourite_bg=get_favourite_wallpaper())
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
@app.route("/delete_user/<user_id>", methods=["POST", "GET"])
def delete_user(user_id):
requested_user = User.query.get(user_id)
db.session.delete(requested_user)
db.session.commit()
return redirect(url_for("admin_dashboard"))
@app.route("/user-settings/<int:user_id>", methods=["POST", "GET"])
def settings(user_id):
user = User.query.get(user_id)
form = SettingsForm(
name=user.name,
email=user.email,
)
if request.method == "POST":
if form.validate_on_submit():
new_email = form.email.data
new_name = form.name.data
user.email = new_email
user.name = new_name
db.session.commit()
return redirect(url_for("home"))
print(current_user.get_id())
return render_template("settings.html", form=form, user_logged_in=current_user.is_authenticated,
task="User Settings", favourite_bg=get_favourite_wallpaper(), all_wallpapers=wallpapers,
wallpaper_num=len(wallpapers))
@app.route("/transfer_to_settings")
def go_to_settings():
return redirect(url_for("settings", user_id=current_user.get_id()))
@app.route("/setwallpaper/<int:wallpaper_number>")
def set_wallapper(wallpaper_number):
chosen_wallpaper = wallpapers[wallpaper_number]
user = User.query.get(current_user.get_id())
user.favourite_bg = chosen_wallpaper
db.session.commit()
return redirect(url_for("home"))
@app.route("/magazine", methods=["GET", "POST"])
def magazine():
if request.method == "POST":
maker = NewsLetterMaker()
maker.make_magic()
return send_from_directory(directory="static/newsletter/pdfs", filename="final_issue.pdf")
return render_template("get-mag.html", favourite_bg=get_favourite_wallpaper(), task="Magazine Download",
user_logged_in=current_user.is_authenticated)
# ==================================================================================================================== #
# Home page
featured_posts = get_top_news()
@app.route("/")
def home():
posts = BlogPost.query.all()
posts.reverse()
if len(posts) != 0:
return render_template("home/index.html", featured_posts=featured_posts,
user_logged_in=current_user.is_authenticated, task="Home",
favourite_bg=get_favourite_wallpaper(), show_posts=True, latest_post=posts[0])
else:
return render_template("home/index.html", featured_posts=featured_posts,
user_logged_in=current_user.is_authenticated, task="Home",
favourite_bg=get_favourite_wallpaper(), show_posts=False)
@app.route("/refresh-news")
def refresh():
global featured_posts
featured_posts = get_top_news()
return redirect(url_for("flash_news"))
@app.route("/flash-news")
def flash_news():
if current_user.is_authenticated:
user = User.query.get(current_user.get_id())
return render_template("home/flash_news.html", featured_posts=featured_posts, bg_image=user.favourite_bg,
task="Flash News", favourite_bg=get_favourite_wallpaper())
else:
return render_template("home/flash_news.html", featured_posts=featured_posts, bg_image=DEFAULT_BG,
task="Flash News", favourite_bg=get_favourite_wallpaper())
# ==================================================================================================================== #
@app.route('/blog')
def get_all_posts():
posts = BlogPost.query.all()
if not is_writer():
return render_template("galactic blog/index.html", all_posts=posts,
user_logged_in=current_user.is_authenticated, task="Blog",
favourite_bg=get_favourite_wallpaper())
else:
return render_template("galactic blog/index.html", all_posts=posts, user_logged_in=True, admin_access=True,
task="Blog", favourite_bg=get_favourite_wallpaper())
@app.route("/blog/post/<int:post_id>", methods=["GET", "POST"])
def show_post(post_id):
requested_post = BlogPost.query.get(post_id)
form = CommentForm()
if form.validate_on_submit():
if current_user.is_authenticated:
new_comment = Comment(
text=form.body.data,
comment_author=current_user,
parent_post=requested_post
)
db.session.add(new_comment)
db.session.commit()
print(requested_post.comments)
else:
flash(COMMENT_LOGIN_ERROR)
return redirect(url_for("login"))
return render_template("galactic blog/post.html", post=requested_post, is_writer=is_writer(), post_id=post_id,
form=form,
user_logged_in=current_user.is_authenticated, task="Blog Post",
favourite_bg=get_favourite_wallpaper(), hide_bg=True)
@app.route("/blog/new-post", methods=["POST", "GET"])
@admin_only
def add_new_post():
form = CreatePostForm()
if form.validate_on_submit():
new_post = BlogPost(
title=form.title.data,
subtitle=form.subtitle.data,
body=form.body.data,
img_url=form.img_url.data,
author=current_user,
date=date.today().strftime("%B %d, %Y")
)
db.session.add(new_post)
db.session.commit()
return redirect(url_for("get_all_posts"))
return render_template("galactic blog/make-post.html", form=form, task="New Blog Post",
favourite_bg=get_favourite_wallpaper())
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# Admin Panel #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
@app.route("/admin_panel")
@admin_only
def admin_dashboard():
all_users = User.query.all()
return render_template("admin panel/index.html", user_data=all_users)
@app.route("/acess/<acess_type>/<action>/<user_id>")
@admin_only
def acess(acess_type, user_id, action):
requested_user = User.query.get(user_id)
if action == "give":
if acess_type == "admin":
requested_user.admin_acess = True
elif acess_type == "writer":
requested_user.writer_acess = True
else:
if acess_type == "admin":
requested_user.admin_acess = False
elif acess_type == "writer":
requested_user.writer_acess = False
db.session.commit()
return redirect(url_for("admin_dashboard"))
@app.route("/edit-post/<int:post_id>", methods=["GET", "POST"])
@admin_only
def edit_post(post_id):
post = BlogPost.query.get(post_id)
edit_form = CreatePostForm(
title=post.title,
subtitle=post.subtitle,
img_url=post.img_url,
author=post.author,
body=post.body
)
if edit_form.validate_on_submit():
post.title = edit_form.title.data
post.subtitle = edit_form.subtitle.data
post.img_url = edit_form.img_url.data
post.author = post.author
post.body = edit_form.body.data
db.session.commit()
return redirect(url_for("show_post", post_id=post.id))
return render_template("galactic blog/make-post.html", form=edit_form)
@app.route("/delete/<int:post_id>")
@admin_only
def delete_post(post_id):
post_to_delete = BlogPost.query.get(post_id)
db.session.delete(post_to_delete)
db.session.commit()
return redirect(url_for('get_all_posts'))
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# ==================================================================================================================== #
# Not found pages
@app.errorhandler(404)
def page_not_found(e):
error_data = ERROR_CODES["404"]
return render_template("galactic blog/error.html", error=error_data)
@app.errorhandler(403)
def page_not_found(e):
error_data = ERROR_CODES["403"]
return render_template("galactic blog/error.html", error=error_data)
@app.errorhandler(500)
def page_not_found(e):
error_data = ERROR_CODES["500"]
return render_template("galactic blog/error.html", error=error_data)
# ==================================================================================================================== #
if __name__ == "__main__":
app.run(debug=True)
| 10,443
| 3,309
| 701
|
ac50c9f36dcbea9928111afa8816ae2d5946914f
| 3,013
|
py
|
Python
|
SynthQuickstartTemplate.py
|
aws-quickstart/quickstart-aws-biotech-blueprint-cdk
|
3500bf5ec455a034a7d2d1074b68073d13ad76cd
|
[
"Apache-2.0",
"MIT-0"
] | 8
|
2021-02-18T16:06:56.000Z
|
2022-03-11T16:24:42.000Z
|
SynthQuickstartTemplate.py
|
paulu-aws/quickstart-aws-biotech-blueprint-cdk
|
93cbe2d71dc72530fac99014773fde64bad56495
|
[
"Apache-2.0",
"MIT-0"
] | 7
|
2021-04-02T18:05:33.000Z
|
2022-02-15T22:48:22.000Z
|
SynthQuickstartTemplate.py
|
paulu-aws/quickstart-aws-biotech-blueprint-cdk
|
93cbe2d71dc72530fac99014773fde64bad56495
|
[
"Apache-2.0",
"MIT-0"
] | 9
|
2021-02-18T16:14:25.000Z
|
2022-03-30T20:17:04.000Z
|
import os
import yaml
folders = []
files = []
for entry in os.scandir('./lambda_functions/source/'):
if entry.is_dir():
if "asset." not in entry.path:
print("WARN: Skipping path...")
else:
folders.append(entry.path)
templateStream = open('./templates/AwsBiotechBlueprint.template.yml', 'r')
templateData = yaml.safe_load(templateStream)
taskcatConfigStream = open('./.taskcat.yml', 'r')
taskcatConfig = yaml.safe_load(taskcatConfigStream)
for assetFolder in folders:
assetFolderComponents = assetFolder.split('asset.')
assetId = assetFolderComponents[1]
for parameter in templateData['Parameters']:
if assetId in parameter:
if 'S3Bucket' in parameter:
templateData['Parameters'][parameter]['Default'] = "aws-quickstart"
taskcatConfig['tests']['default']['parameters'][parameter] = '$[taskcat_autobucket]'
templateData['Conditions'][f'UsingDefaultQuickstartBucket{assetId}'] = {
"Fn::Equals" : [{"Ref" : parameter}, "aws-quickstart"]
}
if 'VersionKey' in parameter:
templateData['Parameters'][parameter]['Default'] = f"quickstart-aws-biotech-blueprint-cdk/lambda_functions/packages/asset{assetId}/||lambda.zip"
taskcatConfig['tests']['default']['parameters'][parameter] = f"quickstart-aws-biotech-blueprint-cdk/lambda_functions/packages/asset{assetId}/||lambda.zip"
if 'ArtifactHash' in parameter:
templateData['Parameters'][parameter]['Default'] = assetId
taskcatConfig['tests']['default']['parameters'][parameter] = assetId
for resource in templateData['Resources']:
resourceType = templateData['Resources'][resource]['Type']
if resourceType == 'AWS::Lambda::Function':
if "S3Bucket" in templateData['Resources'][resource]['Properties']['Code']:
if assetId in templateData['Resources'][resource]['Properties']['Code']['S3Bucket']['Ref']:
bucketParamName = templateData['Resources'][resource]['Properties']['Code']['S3Bucket']['Ref']
templateData['Resources'][resource]['Properties']['Code']['S3Bucket'] = {
"Fn::If": [f'UsingDefaultQuickstartBucket{assetId}', { "Fn::Join" : ['-', [ {"Ref": bucketParamName} , {"Ref": 'AWS::Region'} ] ] } , {"Ref": bucketParamName}]
}
os.replace(assetFolder, f"./lambda_functions/source/asset{assetId}")
with open('./templates/AwsBiotechBlueprint.template.quickstart.yml', 'w') as yaml_file:
yaml_file.write( yaml.dump(templateData, default_flow_style=False))
with open('./.taskcat.yml', 'w') as yaml_file:
yaml_file.write( yaml.dump(taskcatConfig, default_flow_style=False))
| 41.847222
| 183
| 0.596747
|
import os
import yaml
folders = []
files = []
for entry in os.scandir('./lambda_functions/source/'):
if entry.is_dir():
if "asset." not in entry.path:
print("WARN: Skipping path...")
else:
folders.append(entry.path)
templateStream = open('./templates/AwsBiotechBlueprint.template.yml', 'r')
templateData = yaml.safe_load(templateStream)
taskcatConfigStream = open('./.taskcat.yml', 'r')
taskcatConfig = yaml.safe_load(taskcatConfigStream)
for assetFolder in folders:
assetFolderComponents = assetFolder.split('asset.')
assetId = assetFolderComponents[1]
for parameter in templateData['Parameters']:
if assetId in parameter:
if 'S3Bucket' in parameter:
templateData['Parameters'][parameter]['Default'] = "aws-quickstart"
taskcatConfig['tests']['default']['parameters'][parameter] = '$[taskcat_autobucket]'
templateData['Conditions'][f'UsingDefaultQuickstartBucket{assetId}'] = {
"Fn::Equals" : [{"Ref" : parameter}, "aws-quickstart"]
}
if 'VersionKey' in parameter:
templateData['Parameters'][parameter]['Default'] = f"quickstart-aws-biotech-blueprint-cdk/lambda_functions/packages/asset{assetId}/||lambda.zip"
taskcatConfig['tests']['default']['parameters'][parameter] = f"quickstart-aws-biotech-blueprint-cdk/lambda_functions/packages/asset{assetId}/||lambda.zip"
if 'ArtifactHash' in parameter:
templateData['Parameters'][parameter]['Default'] = assetId
taskcatConfig['tests']['default']['parameters'][parameter] = assetId
for resource in templateData['Resources']:
resourceType = templateData['Resources'][resource]['Type']
if resourceType == 'AWS::Lambda::Function':
if "S3Bucket" in templateData['Resources'][resource]['Properties']['Code']:
if assetId in templateData['Resources'][resource]['Properties']['Code']['S3Bucket']['Ref']:
bucketParamName = templateData['Resources'][resource]['Properties']['Code']['S3Bucket']['Ref']
templateData['Resources'][resource]['Properties']['Code']['S3Bucket'] = {
"Fn::If": [f'UsingDefaultQuickstartBucket{assetId}', { "Fn::Join" : ['-', [ {"Ref": bucketParamName} , {"Ref": 'AWS::Region'} ] ] } , {"Ref": bucketParamName}]
}
os.replace(assetFolder, f"./lambda_functions/source/asset{assetId}")
with open('./templates/AwsBiotechBlueprint.template.quickstart.yml', 'w') as yaml_file:
yaml_file.write( yaml.dump(templateData, default_flow_style=False))
with open('./.taskcat.yml', 'w') as yaml_file:
yaml_file.write( yaml.dump(taskcatConfig, default_flow_style=False))
| 0
| 0
| 0
|
fa03a84ad8d150cd10cb63d26cdedd3bf4f92191
| 1,970
|
py
|
Python
|
benchmarks/wrappers/azure/python/storage.py
|
mcopik/serverless-benchmarks
|
3b57688873853a1ea74e10b02a9d89f3a4d679ac
|
[
"BSD-3-Clause"
] | 35
|
2020-12-30T19:31:30.000Z
|
2022-03-28T11:10:00.000Z
|
benchmarks/wrappers/azure/python/storage.py
|
mcopik/serverless-benchmarks
|
3b57688873853a1ea74e10b02a9d89f3a4d679ac
|
[
"BSD-3-Clause"
] | 24
|
2021-01-04T15:37:05.000Z
|
2022-03-14T00:45:20.000Z
|
benchmarks/wrappers/azure/python/storage.py
|
mcopik/serverless-benchmarks
|
3b57688873853a1ea74e10b02a9d89f3a4d679ac
|
[
"BSD-3-Clause"
] | 10
|
2021-06-13T13:13:39.000Z
|
2021-12-20T22:05:50.000Z
|
import os
import uuid
from azure.storage.blob import BlobServiceClient
| 33.389831
| 78
| 0.62132
|
import os
import uuid
from azure.storage.blob import BlobServiceClient
class storage:
instance = None
client = None
def __init__(self):
self.client = BlobServiceClient.from_connection_string(
os.getenv('STORAGE_CONNECTION_STRING')
)
@staticmethod
def unique_name(name):
name, extension = os.path.splitext('.')
return '{name}.{random}.{extension}'.format(
name=name,
extension=extension,
random=str(uuid.uuid4()).split('-')[0]
)
def upload(self, container, file, filepath):
with open(filepath, 'rb') as data:
return self.upload_stream(container, file, data)
def download(self, container, file, filepath):
with open(filepath, 'wb') as download_file:
download_file.write( self.download_stream(container, file) )
def download_directory(self, container, prefix, path):
client = self.client.get_container_client(container=container)
objects = client.list_blobs(name_starts_with=prefix)
for obj in objects:
file_name = obj.name
path_to_file = os.path.dirname(file_name)
os.makedirs(os.path.join(path, path_to_file), exist_ok=True)
self.download(container, file_name, os.path.join(path, file_name))
def upload_stream(self, container, file, data):
key_name = storage.unique_name(file)
client = self.client.get_blob_client(
container=container,
blob=key_name
)
client.upload_blob(data)
return key_name
def download_stream(self, container, file):
client = self.client.get_blob_client(container=container, blob=file)
return client.download_blob().readall()
def get_instance():
if storage.instance is None:
storage.instance = storage()
return storage.instance
| 1,597
| 277
| 23
|
d40aefb01dffe34973a1f115edb7b5c079b5d477
| 57
|
py
|
Python
|
caiman_napari/__init__.py
|
kushalkolar/caiman-napari-prototype
|
e9434d513f0454fd84c1dc0987d4c0658a2dfda4
|
[
"Apache-2.0"
] | null | null | null |
caiman_napari/__init__.py
|
kushalkolar/caiman-napari-prototype
|
e9434d513f0454fd84c1dc0987d4c0658a2dfda4
|
[
"Apache-2.0"
] | null | null | null |
caiman_napari/__init__.py
|
kushalkolar/caiman-napari-prototype
|
e9434d513f0454fd84c1dc0987d4c0658a2dfda4
|
[
"Apache-2.0"
] | 1
|
2021-12-03T21:22:08.000Z
|
2021-12-03T21:22:08.000Z
|
from .cnmf import napari_experimental_provide_dock_widget
| 57
| 57
| 0.929825
|
from .cnmf import napari_experimental_provide_dock_widget
| 0
| 0
| 0
|
f19ed7865020b380c9590a1b3e1cccd8d9dbbe49
| 1,425
|
py
|
Python
|
LeetCode_OneEditDistance.py
|
amukher3/Problem_solutions
|
8fa6014a91f295d08cafb989024caa91d99211d9
|
[
"Apache-2.0"
] | 1
|
2021-12-28T08:58:51.000Z
|
2021-12-28T08:58:51.000Z
|
LeetCode_OneEditDistance.py
|
amukher3/Coding
|
a330cb04b5dd5cc1c3cf69249417a71586441bc7
|
[
"Apache-2.0"
] | null | null | null |
LeetCode_OneEditDistance.py
|
amukher3/Coding
|
a330cb04b5dd5cc1c3cf69249417a71586441bc7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 08:39:36 2020
@author: abhi0
"""
| 24.152542
| 57
| 0.412632
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 08:39:36 2020
@author: abhi0
"""
class Solution:
def isOneEditDistance(self, s: str, t: str) -> bool:
if s==t:
return False
if len(s)-len(t)>1:
return False
if len(t)-len(s)>1:
return False
temp=[]
tempPrime=[]
temp.extend(s)
tempPrime.extend(t)
#Delete a character
for i in range(len(temp)):
temp.pop(i)
if temp==tempPrime:
return True
temp=[]
temp.extend(s)
temp=[]
tempPrime=[]
temp.extend(s)
tempPrime.extend(t)
#Insert a character:
for i in range(len(tempPrime)):
temp.insert(i,tempPrime[i])
if temp==tempPrime:
return True
temp=[]
temp.extend(s)
temp=[]
tempPrime=[]
temp.extend(s)
tempPrime.extend(t)
#Replace a character:
for i in range(len(temp)):
temp.pop(i)
if i<len(tempPrime):
temp.insert(i,tempPrime[i])
if temp==tempPrime:
return True
temp=[]
temp.extend(s)
return False
| 1,291
| -6
| 51
|
f6a2109f5c8ab12cff7af75ada06b22aa5d208dd
| 445
|
py
|
Python
|
web_app/__init__.py
|
jasimrashid/airbnb
|
c78e69050a824d681794459d44a5915710e50153
|
[
"MIT"
] | null | null | null |
web_app/__init__.py
|
jasimrashid/airbnb
|
c78e69050a824d681794459d44a5915710e50153
|
[
"MIT"
] | null | null | null |
web_app/__init__.py
|
jasimrashid/airbnb
|
c78e69050a824d681794459d44a5915710e50153
|
[
"MIT"
] | null | null | null |
# web_app/__init__.py
from flask import Flask
import os
from dotenv import load_dotenv
from web_app.routes.home_routes import home_routes
from web_app.routes.stats_routes import stats_routes
if __name__ == "__main__":
my_app = create_app()
my_app.run(debug=True)
| 18.541667
| 52
| 0.750562
|
# web_app/__init__.py
from flask import Flask
import os
from dotenv import load_dotenv
from web_app.routes.home_routes import home_routes
from web_app.routes.stats_routes import stats_routes
def create_app():
app = Flask(__name__)
app.register_blueprint(home_routes)
app.register_blueprint(stats_routes)
# app.run(debug=True)
return app
if __name__ == "__main__":
my_app = create_app()
my_app.run(debug=True)
| 147
| 0
| 23
|
f9f1195387846bdb0a095420fbab31e4f29f1620
| 1,203
|
py
|
Python
|
model/backbone.py
|
aditya0212jain/fast_face_tf
|
511e94ef61932ff32d9bfea9ce5d5e45e3e44ac3
|
[
"MIT"
] | 2
|
2020-02-28T11:05:25.000Z
|
2020-09-16T02:50:34.000Z
|
model/backbone.py
|
aditya0212jain/fast_face_tf
|
511e94ef61932ff32d9bfea9ce5d5e45e3e44ac3
|
[
"MIT"
] | null | null | null |
model/backbone.py
|
aditya0212jain/fast_face_tf
|
511e94ef61932ff32d9bfea9ce5d5e45e3e44ac3
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from PIL import Image
import glob
import os
# import tensorflow.contrib.slim as slim
import tensorflow.keras as keras
def get_feature_extracting_model(input_tensor=None,input_shape=(480,640,3),model_name='resnet50',layer_index=[6,38,80,142,174]):
"""
input_shape : the input size of the image
model_name : which backbone model to be used for feature extraction
layer_names : the names of the layer from which the outputs are to be returned
Return: keras model with outputs of the given layers for the given model
**Note** : Currently only works for resnet, and layer_names provided should be valid, for resnet50 the
results from the last layer of each block are returned
"""
if model_name=='resnet50':
model_i = keras.applications.ResNet50(include_top=False,weights='imagenet',input_tensor=input_tensor,input_shape=input_shape,pooling=None)
else:
print("Currently only support for resnet50")
return
C = []
for i in layer_index:
C.append(model_i.get_layer(model_i.layers[i].name).output)
# model = keras.models.Model(inputs = model_i.input,outputs=C)
return C
| 44.555556
| 146
| 0.729842
|
import numpy as np
import tensorflow as tf
from PIL import Image
import glob
import os
# import tensorflow.contrib.slim as slim
import tensorflow.keras as keras
def get_feature_extracting_model(input_tensor=None,input_shape=(480,640,3),model_name='resnet50',layer_index=[6,38,80,142,174]):
"""
input_shape : the input size of the image
model_name : which backbone model to be used for feature extraction
layer_names : the names of the layer from which the outputs are to be returned
Return: keras model with outputs of the given layers for the given model
**Note** : Currently only works for resnet, and layer_names provided should be valid, for resnet50 the
results from the last layer of each block are returned
"""
if model_name=='resnet50':
model_i = keras.applications.ResNet50(include_top=False,weights='imagenet',input_tensor=input_tensor,input_shape=input_shape,pooling=None)
else:
print("Currently only support for resnet50")
return
C = []
for i in layer_index:
C.append(model_i.get_layer(model_i.layers[i].name).output)
# model = keras.models.Model(inputs = model_i.input,outputs=C)
return C
| 0
| 0
| 0
|
ff7e2e3511ce6d7046cc747e90209397c73bb365
| 16,892
|
py
|
Python
|
streambox/test/test.py
|
chenzongxiong/streambox
|
76f95780d1bf6c02731e39d8ac73937cea352b95
|
[
"Unlicense"
] | 3
|
2019-07-03T14:03:31.000Z
|
2021-12-19T10:18:49.000Z
|
streambox/test/test.py
|
chenzongxiong/streambox
|
76f95780d1bf6c02731e39d8ac73937cea352b95
|
[
"Unlicense"
] | 6
|
2020-02-17T12:01:30.000Z
|
2021-12-09T22:02:33.000Z
|
streambox/test/test.py
|
chenzongxiong/streambox
|
76f95780d1bf6c02731e39d8ac73937cea352b95
|
[
"Unlicense"
] | 2
|
2020-12-03T04:41:18.000Z
|
2021-01-11T21:44:42.000Z
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Android system-wide tracing utility.
This is a tool for capturing a trace that includes data from both userland and
the kernel. It creates an HTML file for visualizing the trace.
"""
import errno, optparse, os, select, subprocess, sys, time, zlib, signal
import re
import tempfile
import math
import json
from regression import *
#from simple import *
#from simple1 import * # for testing this tester per se
"""
test configuration
"""
config_tput_min = 100
config_tput_max = 50000
config_tput_resolution = 100
config_output_timeout_sec = 10 # can be floating pt
config_max_runtime_sec = 60 # give up
"""
default args
"""
#config_default_cores = [4, 12, 32, 56]
config_default_records = 1000 * 1000 # records per epoch
'''
global vars
'''
the_output_dir = ""
"""
test cases
"""
"""
{
"name" : "grep",
"exec" : "./test-grep.bin",
#"cores" : [4, 12, 32, 56],
"cores" : [56],
"records" : 1000,
"record_size" : 2000,
"target_ms" : 1000,
"input_file" : "/ssd/1g.txt",
# --- optional --- #
"tput_hint" : 4000,
},
"""
"""
app_list = [
"test-grep",
"test-wc",
"test-wingrep",
"test-join",
"test-join-2",
"networklatency",
"test-distinct",
"test-tweet"
]
"""
"""
sample line:
dump markers: >>>>>>>>>>>>>>>>>>>>>>>>>>>>total 7 ms
# return: delay in ms
"""
"""
sample line (for backward compatibility; future sources should have same name):
unbounded-inmem 19.07 19.07 19.53 19.53
[unbounded] 20.35 20.35 2604.17 2604.17
[netsource] 31.79 31.79 813.80 813.80
return: tput in krec/s (floating)
"""
# stateless
# @delays: a list of all historical delays
# return:
DECIDE_FAIL = 1 # failed. abort.
DECIDE_CONT = 2 # should continue
DECIDE_OK = 3 # target_delay is met
#DECIDE_DUNNO = 4 # can't decide yet
DECIDE_EXCEPTION = 5 # what happened?
decide_descs = ["", "fail", "cont", "ok", "dunno", "exception"]
# XXX catch c-c signal to ensure all test programs killed XXX
is_stop = False
'''
return (status, tput)
tput is floating pt. <0 if none achieved
'''
# @core == -1 if unspecified on cmdline
# return: actual_tput
# XXX only support one core now. but that's fine
if __name__ == '__main__':
signal.signal(signal.SIGINT, stop_test_handler)
the_output_dir = tempfile.mkdtemp()
# results will be inserted in place into @all_tests
''' check & print all test info '''
test_names = {}
# detect duplicate test names
for test in all_tests:
if test_names.has_key(test["name"]):
print >> sys.stderr, "abort: duplicate test names:", test["name"];
sys.exit(1)
test_names[test["name"]] = 1 # remember to check duplicates
if test.has_key("softdelay_maxbad_ms") and test["softdelay_maxbad_ms"] < test["target_ms"]:
print >> sys.stderr, "abort: config err: [%s] softdelay maxbad ms < target ms" %test["name"]
sys.exit(-1)
''' print menu '''
print "========================================"
print "select tests to run (enter to run all)"
for i, test in enumerate(all_tests):
print i, test["name"];
try:
choice = int(raw_input('Enter your input:'))
print "Okay ... Will run test", all_tests[choice]["name"]
all_tests = [all_tests[choice]]
except ValueError:
print "Okay ... Will run all tests."
for test in all_tests:
atput = launch_one_test(test)
if atput < 0:
print "%s exception: can't get the tput." %test["name"]
test["actual_tput"] = -1 # is this okay?
else:
test["actual_tput"] = atput
print "%s completes: actual tput %d krec/s target_delay %d ms" %(test["name"], atput, test["target_ms"])
print "========================================"
print "%20s %10s %10s %10s %6s %15s" %("test", "target_ms", "tput/krecs", "base/krecs", "improve%", "elapsed/sec")
for test in all_tests:
tput_inc = -999.99
tput_inc_str = "--"
tput_baseline_str = "--"
if test.has_key("disable") and test["disable"]:
#if not test.has_key("elapsed_sec"): # test never executed?
print "%10s -- skipped -- " %(test["name"])
continue
if test.has_key("tput_baseline"):
tput_inc = 100.0 * (test["actual_tput"] - test["tput_baseline"]) / test["tput_baseline"]
tput_inc_str = "%.2f" %(tput_inc)
tput_baseline_str = "%d" %(test["tput_baseline"])
#print "baseline is", test["tput_baseline"]
print "%20s %10d %10d %10s %6s %15.2f" \
%(test["name"], test["target_ms"], test["actual_tput"], tput_baseline_str, tput_inc_str, test["elapsed_sec"])
print "========================================"
print "diff=-999 means no baseline provided"
print "all done. check result dir:\n ls ", the_output_dir
| 30.381295
| 118
| 0.645749
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Android system-wide tracing utility.
This is a tool for capturing a trace that includes data from both userland and
the kernel. It creates an HTML file for visualizing the trace.
"""
import errno, optparse, os, select, subprocess, sys, time, zlib, signal
import re
import tempfile
import math
import json
from regression import *
#from simple import *
#from simple1 import * # for testing this tester per se
"""
test configuration
"""
config_tput_min = 100
config_tput_max = 50000
config_tput_resolution = 100
config_output_timeout_sec = 10 # can be floating pt
config_max_runtime_sec = 60 # give up
"""
default args
"""
#config_default_cores = [4, 12, 32, 56]
config_default_records = 1000 * 1000 # records per epoch
'''
global vars
'''
the_output_dir = ""
"""
test cases
"""
"""
{
"name" : "grep",
"exec" : "./test-grep.bin",
#"cores" : [4, 12, 32, 56],
"cores" : [56],
"records" : 1000,
"record_size" : 2000,
"target_ms" : 1000,
"input_file" : "/ssd/1g.txt",
# --- optional --- #
"tput_hint" : 4000,
},
"""
"""
app_list = [
"test-grep",
"test-wc",
"test-wingrep",
"test-join",
"test-join-2",
"networklatency",
"test-distinct",
"test-tweet"
]
"""
"""
sample line:
dump markers: >>>>>>>>>>>>>>>>>>>>>>>>>>>>total 7 ms
# return: delay in ms
"""
def parse_line_delay(line):
delay_regex = r'''dump markers: >>>>>>>>>>>>>>>>>>>>>>>>>>>>total (\d+) ms'''
m = re.match(delay_regex, line)
if not m:
return None
else:
return (m.group(1))
"""
sample line (for backward compatibility; future sources should have same name):
unbounded-inmem 19.07 19.07 19.53 19.53
[unbounded] 20.35 20.35 2604.17 2604.17
[netsource] 31.79 31.79 813.80 813.80
return: tput in krec/s (floating)
"""
def parse_line_tput(line):
# the "?:" is for "non capturing" group.
regex = r'''\s+(?:unbounded-inmem|\[unbounded\]|\[netsource\])\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)'''
m = re.match(regex, line)
if not m:
return None
else:
recent_mbs = m.group(1)
avg_mbs = m.group(2)
recent_krecs = m.group(3)
avg_krecs = m.group(4)
return (avg_krecs)
# stateless
# @delays: a list of all historical delays
# return:
DECIDE_FAIL = 1 # failed. abort.
DECIDE_CONT = 2 # should continue
DECIDE_OK = 3 # target_delay is met
#DECIDE_DUNNO = 4 # can't decide yet
DECIDE_EXCEPTION = 5 # what happened?
decide_descs = ["", "fail", "cont", "ok", "dunno", "exception"]
# XXX catch c-c signal to ensure all test programs killed XXX
def decide_delay(delays, target_delay, test):
n_init_samples = 10 # drop them all. must >= 1 since we do i-1 below
n_recent_samples = 15
n_total_samples = len(delays)
# must at least pass the init phase and have 1 window
if len(delays) < n_init_samples + n_recent_samples:
return DECIDE_CONT
# check most recent N samples
is_go_up = True
is_go_down = True
n_good = 0
n_bad = 0
n_anomaly = 0
# --- decide by trend --- #
# trend -- do we keep going up/down?
# compute mov avg windows
n_moving_avg_win = 3
assert(n_recent_samples - n_moving_avg_win >= 5)
if (n_recent_samples < n_moving_avg_win): # consider increasing n_recent_samples
return DECIDE_CONT
mov_delays = []
for i in range(n_total_samples - n_recent_samples, n_total_samples - n_moving_avg_win):
s = 0
for offset in range(n_moving_avg_win):
s += delays[i + offset]
s /= n_moving_avg_win
mov_delays.append(s)
if (len(mov_delays) <= 1):
return DECIDE_CONT
for i in range(len(mov_delays) - 1):
if mov_delays[i] >= mov_delays[i+1]: # since we're mv avg, robust to ==
is_go_up = False
if mov_delays[i] <= mov_delays[i+1]: # since we're mv avg, robust to ==
is_go_down = False
#if (is_go_up and n_good > n_recent_samples / 2):
if (is_go_up): # shall we also say how far it is from our target delay?
print "latency fail: rising delays", delays[n_total_samples - n_recent_samples:]
return DECIDE_FAIL
# --- decide by target delay --- #
# all good (or good enough for softdelay) -- okay
# all bad -- fail
# a mix of good & bad -- undecided
#elif (1.0 * n_good / n_recent_samples <= 0.05)
maxgood = target_delay
maxbad = target_delay # softdelay can't pass if any sample larger than this. but may not fail immediately
max_n_bad = 0 # pass , if #bads in a window smaller than this
if test.has_key("softdelay_maxbad_ratio"):
max_n_bad = math.ceil(n_recent_samples * test["softdelay_maxbad_ratio"])
maxbad = test["softdelay_maxbad_ms"]
max_sofar = -1
# go through individual samples
for i in range(n_total_samples - n_recent_samples, n_total_samples):
if delays[i] > maxgood:
#print "over target", i, delays[i]
n_bad += 1
if delays[i] > max_sofar:
#print "latency fail. anonmaly:", delays[i], "over limit", maxbad
#return DECIDE_FAIL
max_sofar = delays[i]
if (n_bad <= max_n_bad and max_sofar < maxbad): # good (enough)
print "latency okay! bad:", n_bad, delays[n_total_samples - n_recent_samples:]
return DECIDE_OK
if (n_bad == n_recent_samples): # all bad
print "latency fail: all over targets", delays[n_total_samples - n_recent_samples:]
return DECIDE_FAIL
# undecided -- a mix of good and bad
print "can't decide. n_bad", n_bad, "maxsofar", max_sofar, "maxbad", maxbad, \
"recent delays: ", delays[n_total_samples - n_recent_samples:]
return DECIDE_CONT
# XXX more logic? XXX
#elif (is_go_down)
# return DECIDE_CONT
is_stop = False
def stop_test_handler(signal, frame):
print "Stopping the test.... (ctrl-c)"
is_stop = True
'''
return (status, tput)
tput is floating pt. <0 if none achieved
'''
def run_one_test(test, atrace_args):
trace_started = False
leftovers = ''
start_sec = time.time()
#html_filename = os.path.join(the_output_dir, "%s-tput%d.log" %(test["name"], test["target_tput"]))
# will rename this after test done
html_filename = os.path.join(the_output_dir, test["name"],
"%s-tput%d-ongoing.log" %(test["name"], test["target_tput"]))
html_file = open(html_filename, 'w')
delays = []
avg_tput = -1
test_status = DECIDE_EXCEPTION
# xzl -- run the actual command on target --
print "run cmd ===>", atrace_args
adb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE) # to feed stdin
# per Python doc, this may deadlock; but it is NON BLOCKING!
adb.stdin.write('asdb\r\nsdfsd\rdsfsdf\nsfds\r\ndsfsd\r\n')
# this will block read stdout/stderr... bad
#adb.communicate(input=b'asdb\r\nsdfsd\rdsfsdf\nsfds\r\ndsfsd\r\n')[0]
# XXX catch expcetion to kill process
while True:
# we may interrupt the blocking call.
# http://stackoverflow.com/questions/5633067/signal-handling-in-pylons
try:
ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr],
config_output_timeout_sec)
except select.error, v:
if v[0] != errno.EINTR:
print "don't know how to handle"
raise
else:
print "intr!"
if adb.stderr in ready[0]:
err = os.read(adb.stderr.fileno(), 4096)
sys.stderr.write(err)
sys.stderr.flush()
if adb.stdout in ready[0]:
#print "got one std line"
out = leftovers + os.read(adb.stdout.fileno(), 4096)
#if options.from_file is None:
if True: # xzl
out = out.replace('\r\n', '\n')
if out.endswith('\r'):
out = out[:-1]
leftovers = '\r'
else:
leftovers = ''
# XXX toggle this
#sys.stdout.write(out)
#sys.stdout.flush()
html_file.write(out)
html_file.flush()
lines = out.splitlines(True)
out = ''
for i, line in enumerate(lines):
tput = parse_line_tput(line)
if tput != None:
avg_tput = tput
#print "XXXXXXXXXX Got tput", avg_tput
#sys.exit(-1)
# ---- xzl: parse an output line ---- #
#sys.stdout.write("collecting trace...")
#sys.stdout.flush()
delay = parse_line_delay(line)
if delay != None:
delays += [int(delay)]
decision = decide_delay(delays, int(test["target_ms"]), test)
if (decision == DECIDE_OK and avg_tput > 0):
# target delay met, we're done
print "test-wrapper: okay to meet latency; stop"
adb.kill()
test_status = DECIDE_OK
break; # will go check status
elif (decision == DECIDE_FAIL and avg_tput > 0):
print "test-wrapper: fail to meet latency; stop"
adb.kill()
test_status = DECIDE_FAIL
break; # will go check status
#out = ''.join(lines[i:]) #xzl: don't skip any line
#html_file = open(html_filename, 'w')
#html_file.write("# " + "%s" %atrace_args) #xzl: save our command for dbg
trace_started = True
#sys.stdout.write(out)
#sys.stdout.flush()
#html_out = out.replace('\n', '\\n\\\n')
#html_out = out
#if len(html_out) > 0:
# html_file.write(html_out)
# xzl -- done reading a wave of output from target --
if (time.time() - start_sec > config_max_runtime_sec):
print >> sys.stderr, "test timeout. after %.2f sec still can't decide tput" %(time.time() - start_sec)
adb.kill()
test_status = DECIDE_FAIL # it's not an expcetion: maybe the latency just can't stablize.
time.sleep(1)
# check prog status
result = adb.poll()
if result is not None:
break
# result == None means child not end yet
if result != 0:
#print >> sys.stderr, 'program returned error code %d' % result
#print >> sys.stderr, result
print >> sys.stderr, 'program killed'
pass
elif trace_started: # xzl: program ends okay and we've collected some trace
html_out = dec.flush().replace('\n', '\\n\\\n').replace('\r', '')
if len(html_out) > 0:
html_file.write(html_out)
#html_file.write(html_suffix)
html_file.close()
#print " done\n\n wrote file://%s/%s\n" % (os.getcwd(), options.output_file)
print " done\n\n wrote %s\n" % (options.output_file)
else:
print >> sys.stderr, ('An error occured while capturing the trace. Output ' +
'file was not written.')
html_file.close()
# rename based on results
if (test_status == DECIDE_FAIL):
nname = os.path.join(the_output_dir, test["name"],
"%s-fail-target_tput%d.log" %(test["name"], test["target_tput"]))
elif (test_status == DECIDE_EXCEPTION):
nname = os.path.join(the_output_dir, test["name"],
"%s-exception-target_tput%d.log" %(test["name"], test["target_tput"]))
elif (test_status == DECIDE_OK):
nname = os.path.join(the_output_dir, test["name"],
"%s-ok-target_tput%d-actual%d.log" %(test["name"], test["target_tput"], int(float(avg_tput))))
else:
assert(False)
os.rename(html_filename, nname)
return test_status, float(avg_tput)
def save_test_config(test):
config_fname = os.path.join(the_output_dir, test["name"], "config.json")
cf = file(config_fname, "w")
# pretty, see https://docs.python.org/2/library/json.html
json.dump(test, cf, indent=4, separators=(',', ': '))
cf.close()
# @core == -1 if unspecified on cmdline
def print_test_cmd(test, core, tput_best):
print "---------------------"
print "you can repeat the best experiment by:"
print os.path.abspath(test["exec"]), "\\ \n", "--target_tput", tput_best, \
"--records", test["records"], "\\ \n", \
"--input_file", test["input_file"], \
"--record_size", test["record_size"], # add more?
if (core != -1):
print "--cores", core
else:
print
print "---------------------"
# return: actual_tput
# XXX only support one core now. but that's fine
def launch_one_test(test):
tput = 1000 # the target_tput passed to cm
tput_best = -1 # the target_tput corresponding to the best actual tput
tput_low = config_tput_min
tput_high = config_tput_max
if test.has_key("disable") and test["disable"]:
print >> sys.stderr, "skip disabled test: ", test["name"]
return
if test.has_key("tput_hint"):
tput = test["tput_hint"]
os.mkdir(os.path.join(the_output_dir, test["name"]))
save_test_config(test)
start_sec = time.time()
core = -1
if test.has_key("cores"):
core = test["cores"]
actual_tput = -1
while True: # execute the test with different tput
# xzl: the actual command. all args except tput
print "---------> run with tput %d (low %d high %d) target %d ms" %(tput, tput_low, tput_high, test["target_ms"])
args = [test["exec"],
#'--cores=%s' %core,
'--target_tput=%d' %tput,
'--records=%s' %test["records"],
'--input_file=%s' %test["input_file"],
'--record_size=%s' %test["record_size"],
# todo: add more #
]
if (core != -1):
args.append('--cores=%d' %core)
test["target_tput"] = tput # the runner can gen filename based on this
status, t = run_one_test(test, args)
if (status == DECIDE_EXCEPTION):
# save test exec time
test["elapsed_sec"] = time.time() - start_sec
return -1
if (status == DECIDE_OK):
if (t > 0): # update actual tput
print "actual_tput prev:", actual_tput, "new:", t
#assert(actual_tput < 0 or t >= actual_tput + config_tput_resolution) # this may happen...?
#assert(actual_tput < 0 or 1.05 * t >= actual_tput) # this may happen...?
if not (actual_tput < 0 or 1.05 * t >= actual_tput):
# after lowering target tput, we achieve target lat,
# but the actual tput is actually lower than prev. we're done.
test["elapsed_sec"] = time.time() - start_sec
print_test_cmd(test, core, tput_best)
return actual_tput
else:
actual_tput = t
tput_best = tput
# --- is range small enough? --- #
if (tput_low + config_tput_resolution > tput_high): # range small enough, done.
print_test_cmd(test, core, tput_best)
# save test exec time
test["elapsed_sec"] = time.time() - start_sec
return actual_tput
else:
# we set tput based on our prior tput setting, not the actual @actual_tput
tput_low = tput
tput = (tput_low + tput_high) / 2
continue
elif (status == DECIDE_FAIL):
if (tput_low + config_tput_resolution > tput_high):
# save test exec time
test["elapsed_sec"] = time.time() - start_sec
return actual_tput # == -1 if we never seen one
else:
tput_high = tput
tput = (tput_low + tput_high) / 2
continue
else:
print "err?"
sys.exit(1)
if __name__ == '__main__':
signal.signal(signal.SIGINT, stop_test_handler)
the_output_dir = tempfile.mkdtemp()
# results will be inserted in place into @all_tests
''' check & print all test info '''
test_names = {}
# detect duplicate test names
for test in all_tests:
if test_names.has_key(test["name"]):
print >> sys.stderr, "abort: duplicate test names:", test["name"];
sys.exit(1)
test_names[test["name"]] = 1 # remember to check duplicates
if test.has_key("softdelay_maxbad_ms") and test["softdelay_maxbad_ms"] < test["target_ms"]:
print >> sys.stderr, "abort: config err: [%s] softdelay maxbad ms < target ms" %test["name"]
sys.exit(-1)
''' print menu '''
print "========================================"
print "select tests to run (enter to run all)"
for i, test in enumerate(all_tests):
print i, test["name"];
try:
choice = int(raw_input('Enter your input:'))
print "Okay ... Will run test", all_tests[choice]["name"]
all_tests = [all_tests[choice]]
except ValueError:
print "Okay ... Will run all tests."
for test in all_tests:
atput = launch_one_test(test)
if atput < 0:
print "%s exception: can't get the tput." %test["name"]
test["actual_tput"] = -1 # is this okay?
else:
test["actual_tput"] = atput
print "%s completes: actual tput %d krec/s target_delay %d ms" %(test["name"], atput, test["target_ms"])
print "========================================"
print "%20s %10s %10s %10s %6s %15s" %("test", "target_ms", "tput/krecs", "base/krecs", "improve%", "elapsed/sec")
for test in all_tests:
tput_inc = -999.99
tput_inc_str = "--"
tput_baseline_str = "--"
if test.has_key("disable") and test["disable"]:
#if not test.has_key("elapsed_sec"): # test never executed?
print "%10s -- skipped -- " %(test["name"])
continue
if test.has_key("tput_baseline"):
tput_inc = 100.0 * (test["actual_tput"] - test["tput_baseline"]) / test["tput_baseline"]
tput_inc_str = "%.2f" %(tput_inc)
tput_baseline_str = "%d" %(test["tput_baseline"])
#print "baseline is", test["tput_baseline"]
print "%20s %10d %10d %10s %6s %15.2f" \
%(test["name"], test["target_ms"], test["actual_tput"], tput_baseline_str, tput_inc_str, test["elapsed_sec"])
print "========================================"
print "diff=-999 means no baseline provided"
print "all done. check result dir:\n ls ", the_output_dir
| 11,868
| 0
| 179
|
7fb19bf16eeaacf9764b0e90d00fecc4871476b5
| 10,182
|
py
|
Python
|
others/ewm_group_mean.py
|
rahasayantan/Work-For-Reference
|
e052da538df84034ec5a0fe3b19c4287de307286
|
[
"MIT"
] | null | null | null |
others/ewm_group_mean.py
|
rahasayantan/Work-For-Reference
|
e052da538df84034ec5a0fe3b19c4287de307286
|
[
"MIT"
] | null | null | null |
others/ewm_group_mean.py
|
rahasayantan/Work-For-Reference
|
e052da538df84034ec5a0fe3b19c4287de307286
|
[
"MIT"
] | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
#x = train_df.copy()#[['price', 'deal_probability', 'image_top_1']]
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
groupCols = ['region', 'city', 'parent_category_name',
'category_name', 'user_type']
X = train_df[groupCols + ['deal_probability']].groupby(groupCols, as_index=False).agg([len,np.mean])
X.columns = ['_'.join(col).strip() for col in X.columns.values]
X['Group_weight1'] = (X.deal_probability_mean + 1e-6) * np.log1p(X.deal_probability_len)
X.drop(['deal_probability_mean', 'deal_probability_len'], axis = 1, inplace = True)
X.reset_index(inplace = True)
train_df = train_df.merge(X, on = groupCols, how = 'left')
test_df = test_df.merge(X, on = groupCols, how = 'left')
catCols = ['region', 'city', 'parent_category_name',
'category_name', 'param_1', 'param_2', 'param_3', 'user_type']
dftrainnum = train_df[list(set(train_df.columns)-set(catCols+['user_id']))]
dftestnum = test_df[list(set(test_df.columns)-set(catCols+['user_id']))]
train, test,= = catEncode(train_df[catCols].copy(), test_df[catCols].copy(), train_df.deal_probability.values, nbag = 10, nfold = 20, minCount = 1)
train_df = pd.concat((dftrainnum, train), axis =1)
test_df = pd.concat((dftestnum, test), axis =1)
del(dftrainnum, train); gc.collect()
del(dftestnum, test); gc.collect()
| 50.91
| 147
| 0.585445
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_selection, metrics
import lightgbm as lgb
import gc
train_df = pd.read_csv('../input/train.csv', parse_dates=["activation_date"])
test_df = pd.read_csv('../input/test.csv', parse_dates=["activation_date"])
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import random
#x = train_df.copy()#[['price', 'deal_probability', 'image_top_1']]
def genFeatures(x):
x["activation_weekday"] = x["activation_date"].dt.weekday
x["monthday"] = x["activation_date"].dt.day
##################Added in set 1 - 0.01 Improvement
x['price_new'] = np.log1p(x.price) # log transform improves co-relation with deal_price
x['count_null_in_row'] = x.isnull().sum(axis=1)# works
x['has_description'] = x.description.isnull().astype(int)
x['has_image'] = x.image.isnull().astype(int)
x['has_image_top'] = x.image_top_1.isnull().astype(int)
x['has_param1'] = x.param_1.isnull().astype(int)
x['has_param2'] = x.param_2.isnull().astype(int)
x['has_param3'] = x.param_3.isnull().astype(int)
x['has_price'] = x.price.isnull().astype(int)
#################Added in set 2 - 0.00x Improvement
x["description"].fillna("NA", inplace=True)
x["desc_nwords"] = x["description"].apply(lambda x: len(x.split()))
x['len_description'] = x['description'].apply(lambda x: len(x))
x["title_nwords"] = x["title"].apply(lambda x: len(x.split()))
x['len_title'] = x['title'].apply(lambda x: len(x))
x['params'] = x['param_1'].fillna('') + ' ' + x['param_2'].fillna('') + ' ' + x['param_3'].fillna('')
x['params'] = x['params'].str.strip()
x['len_params'] = x['params'].apply(lambda x: len(x))
x['words_params'] = x['params'].apply(lambda x: len(x.split()))
x['symbol1_count'] = x['description'].str.count('↓')
x['symbol2_count'] = x['description'].str.count('\*')
x['symbol3_count'] = x['description'].str.count('✔')
x['symbol4_count'] = x['description'].str.count('❀')
x['symbol5_count'] = x['description'].str.count('➚')
x['symbol6_count'] = x['description'].str.count('ஜ')
x['symbol7_count'] = x['description'].str.count('.')
x['symbol8_count'] = x['description'].str.count('!')
x['symbol9_count'] = x['description'].str.count('\?')
x['symbol10_count'] = x['description'].str.count(' ')
x['symbol11_count'] = x['description'].str.count('-')
x['symbol12_count'] = x['description'].str.count(',')
####################
return x
train_df = genFeatures(train_df)
test_df = genFeatures(test_df)
groupCols = ['region', 'city', 'parent_category_name',
'category_name', 'user_type']
X = train_df[groupCols + ['deal_probability']].groupby(groupCols, as_index=False).agg([len,np.mean])
X.columns = ['_'.join(col).strip() for col in X.columns.values]
X['Group_weight1'] = (X.deal_probability_mean + 1e-6) * np.log1p(X.deal_probability_len)
X.drop(['deal_probability_mean', 'deal_probability_len'], axis = 1, inplace = True)
X.reset_index(inplace = True)
train_df = train_df.merge(X, on = groupCols, how = 'left')
test_df = test_df.merge(X, on = groupCols, how = 'left')
def catEncode(train_char, test_char, y, colLst = [], nbag = 10, nfold = 20, minCount = 3, val = False, val_char = None):
train_df = train_char.copy()
test_df = test_char.copy()
if val == True:
val_df = val_char.copy()
if not colLst:
print("Empty ColLst")
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],3))
enc_mat_test = np.zeros((test_char.shape[0],3))
if val ==True:
enc_mat_val = np.zeros((val_char.shape[0],3))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby([c]).agg([len,np.mean,np.std])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
ind = c
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind)}, inplace = True)
# datax[c+'_medshftenc'] = datax['y_median']-med_y
# datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(-99)
datatst = datatst.join(datax,on=[c], how='left').fillna(-99)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
if val ==True:
valTst = val_char[[c]].copy()
valTst = valTst.join(datax,on=[c], how='left').fillna(-99)
enc_mat_val += valTst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns = list(set(datax.columns)-set([c]))
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
if val == True:
enc_mat_val /= (nfold * nbag)
enc_mat_val = pd.DataFrame(enc_mat_val)
enc_mat_val.columns=enc_mat.columns
val_df = pd.concat([enc_mat_val.reset_index(drop = True),val_df.reset_index(drop = True)],axis=1)
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
else:
print("Not Empty ColLst")
data = train_char[colLst].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],3))
enc_mat_test = np.zeros((test_char.shape[0],3))
if val ==True:
enc_mat_val = np.zeros((val_char.shape[0],3))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(colLst).agg([len,np.mean,np.std])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
ind = '_'.join(colLst)
datax.rename(columns = {'y_mean': ('y_mean_' + ind), 'y_std': ('y_std_' + ind),
'y_len': ('y_len_' + ind)}, inplace = True)
datatst = test_char[colLst].copy()
val_X = val_X.join(datax,on=colLst, how='left').fillna(-99)
datatst = datatst.join(datax,on=colLst, how='left').fillna(-99)
print(val_X[list(set(datax.columns)-set(colLst))].columns)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set(colLst))]
enc_mat_test += datatst[list(set(datax.columns)-set(colLst))]
if val ==True:
valTst = val_char[[c]].copy()
valTst = valTst.join(datax,on=[c], how='left').fillna(-99)
enc_mat_val += valTst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=list(set(datax.columns)-set([c]))
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=enc_mat.columns
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
if val == True:
enc_mat_val /= (nfold * nbag)
enc_mat_val = pd.DataFrame(enc_mat_val)
enc_mat_val.columns=enc_mat.columns
val_df = pd.concat([enc_mat_val.reset_index(drop = True),val_df.reset_index(drop = True)],axis=1)
print(train_df.columns)
print(test_df.columns)
if val == True:
print(val_df.columns)
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
if val == True:
for c in train_df.columns:
if train_df[c].dtype == 'float64':
train_df[c] = train_df[c].astype('float32')
test_df[c] = test_df[c].astype('float32')
val_df[c] = val_df[c].astype('float32')
return train_df, test_df, val_df
else:
return train_df, test_df
catCols = ['region', 'city', 'parent_category_name',
'category_name', 'param_1', 'param_2', 'param_3', 'user_type']
dftrainnum = train_df[list(set(train_df.columns)-set(catCols+['user_id']))]
dftestnum = test_df[list(set(test_df.columns)-set(catCols+['user_id']))]
train, test,= = catEncode(train_df[catCols].copy(), test_df[catCols].copy(), train_df.deal_probability.values, nbag = 10, nfold = 20, minCount = 1)
train_df = pd.concat((dftrainnum, train), axis =1)
test_df = pd.concat((dftestnum, test), axis =1)
del(dftrainnum, train); gc.collect()
del(dftestnum, test); gc.collect()
| 8,205
| 0
| 46
|
b26f47923d5ad384c704a9abb1733c15ca92f5f3
| 433
|
py
|
Python
|
mobilebdd/webdriver_processor.py
|
PhoenixWright/MobileBDDCore
|
b133061cf2c8b8d95ea9164734b726ee8a3c03a0
|
[
"Apache-2.0"
] | null | null | null |
mobilebdd/webdriver_processor.py
|
PhoenixWright/MobileBDDCore
|
b133061cf2c8b8d95ea9164734b726ee8a3c03a0
|
[
"Apache-2.0"
] | null | null | null |
mobilebdd/webdriver_processor.py
|
PhoenixWright/MobileBDDCore
|
b133061cf2c8b8d95ea9164734b726ee8a3c03a0
|
[
"Apache-2.0"
] | null | null | null |
# spec for webdriver processors
class WebDriverProcessor(object):
"""Allows outside users to have the final say on things like capabilities
that are used to instantiate WebDriver.
"""
def process_capabilities(self, capabilities):
"""Process capabilities passed in and return the final dict.
:type capabilities: dict
:rtype: dict
"""
pass
| 27.0625
| 77
| 0.660508
|
# spec for webdriver processors
class WebDriverProcessor(object):
"""Allows outside users to have the final say on things like capabilities
that are used to instantiate WebDriver.
"""
def __init__(self):
pass
def process_capabilities(self, capabilities):
"""Process capabilities passed in and return the final dict.
:type capabilities: dict
:rtype: dict
"""
pass
| 11
| 0
| 26
|
df4ddd8d21c48df995e80724c52440730be17034
| 61
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic unit of electric field.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic unit of electric field.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/A/atomic unit of electric field.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
constants.physical_constants["atomic unit of electric field"]
| 61
| 61
| 0.852459
|
constants.physical_constants["atomic unit of electric field"]
| 0
| 0
| 0
|
0a86e9bd2a90888f6e8f5c43dd81e475a2ed2359
| 7,496
|
py
|
Python
|
freechess/stats.py
|
captnswing/freechess
|
c043bc7ba4216be72182762ac2b6c2117f11105d
|
[
"Apache-2.0"
] | null | null | null |
freechess/stats.py
|
captnswing/freechess
|
c043bc7ba4216be72182762ac2b6c2117f11105d
|
[
"Apache-2.0"
] | null | null | null |
freechess/stats.py
|
captnswing/freechess
|
c043bc7ba4216be72182762ac2b6c2117f11105d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.http import HttpResponseServerError
from django.template import RequestContext
from freechess.models import ChessGame
from dateutil.relativedelta import relativedelta
import datetime
| 48.051282
| 125
| 0.655416
|
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.http import HttpResponseServerError
from django.template import RequestContext
from freechess.models import ChessGame
from dateutil.relativedelta import relativedelta
import datetime
def chessStats(request):
# some general variables
player = 'captnswing'
allgames = ChessGame.objects.all()
if not allgames:
return HttpResponseServerError("No data")
firstgame = allgames[0]
lastgame = allgames.latest()
number_of_games = ChessGame.objects.count()
daterange = (lastgame.date - firstgame.date)
today = datetime.date.today()
y, m = today.timetuple()[:2]
# some basic stats
elotrend = ChessGame.objects.elo_trend()
all_elos = list(allgames.values_list('self_elo'))
stats = {
"startdate": ChessGame.objects.latest().date + relativedelta(months=-9),
"enddate": lastgame.date,
"firstdate": firstgame.date,
"perday": number_of_games / float(daterange.days),
"total": number_of_games,
"currentelo": lastgame.self_elo,
"alltime_maxelo": max(all_elos)[0],
"alltime_maxelo_date": elotrend[all_elos.index(max(all_elos))][1],
"alltime_minelo": min(all_elos)[0],
"alltime_minelo_date": elotrend[all_elos.index(min(all_elos))][1],
}
comments = ChessGame.objects.values_list('comment')
flattenedcomments = ''.join([item for sublist in comments for item in sublist])
if flattenedcomments:
# result tallies
# retrieve a list of all results interactively using
# sorted(list(set([ ' '.join(elem.values()[0].split()[1:]) for elem in ChessGame.objects.all().values('comment') ])))
win_comments = [('resigns', 'opponent resigns'),
('forfeits on time', 'opponent forfeits on time'),
('checkmated', 'opponent checkmated'),
('forfeits by disconnection', 'opponent forfeits by disconnection'), ]
lost_comments = [(player + ' resigns', player + ' resigns'),
(player + ' forfeits on time', player + ' forfeits on time'),
(player + ' checkmated', player + ' checkmated'),
(player + ' forfeits by disconnection', player + ' forfeits by disconnection'), ]
draw_comments = [('player has mating material', 'neither player has mating material'),
('drawn by repetition', 'game drawn by repetition'),
('ran out of time and %s has no material to mate' % player,
'opponent ran out of time and %s can\'t mate' % player),
('%s ran out of time and' % player, '%s ran out of time and opponent can\'t mate' % player),
('drawn because both players ran out of time',
'game drawn because both players ran out of time'),
('drawn by stalemate', 'game drawn by stalemate'),
('drawn by mutual agreement', 'game drawn by mutual agreement'), ]
won_tally = []
for filterstring, cleartext in win_comments:
won_tally.append((cleartext, ChessGame.objects.won_games().filter(comment__contains=filterstring).count()))
drawn_tally = []
for filterstring, cleartext in draw_comments:
drawn_tally.append(
(cleartext, ChessGame.objects.drawn_games().filter(comment__contains=filterstring).count()))
lost_tally = []
for filterstring, cleartext in lost_comments:
lost_tally.append(
(cleartext, ChessGame.objects.lost_games().filter(comment__contains=filterstring).count()))
else:
won_tally = lost_tally = drawn_tally = []
# stats over last three months
three_months_ago = today + relativedelta(months=-3)
three_months_games = allgames.filter(date__range=(three_months_ago, today))
if not three_months_games:
# data contains only older games
latest_date = allgames.latest().date
three_months_ago = latest_date + relativedelta(months=-3)
three_months_games = allgames.filter(date__range=(three_months_ago, today))
# three_months_games.aggregate(Max('self_elo'), Min('self_elo'))
three_months_elotrend = three_months_games.values_list('game_nr', 'date', 'self_elo')
three_months_elos = list(three_months_games.values_list('self_elo'))
stats["three_months_maxelo"] = max(three_months_elos)[0]
stats["three_months_maxelo_date"] = three_months_elotrend[three_months_elos.index(max(three_months_elos))][1]
stats["three_months_minelo"] = min(three_months_elos)[0]
stats["three_months_minelo_date"] = three_months_elotrend[three_months_elos.index(min(three_months_elos))][1]
# stats over color
allgames_played_as_white = allgames.filter(self_white=True)
allgames_played_as_black = allgames.filter(self_white=False)
stats["won_as_white"] = allgames_played_as_white.filter(result="1-0").count()
stats["drawn_as_white"] = allgames_played_as_white.filter(result__contains="1/2").count()
stats["lost_as_white"] = allgames_played_as_white.filter(result="0-1").count()
stats["won_as_black"] = allgames_played_as_black.filter(result="0-1").count()
stats["drawn_as_black"] = allgames_played_as_black.filter(result__contains="1/2").count()
stats["lost_as_black"] = allgames_played_as_black.filter(result="1-0").count()
# stats over opponents
opponent_elosum = 0
stronger = 0
weaker = 0
last3months = allgames.filter(date__gt=three_months_ago)
numberofgames_last3months = last3months.count()
for game in last3months:
opponent_elosum += game.opponent_elo
if game.self_elo < game.opponent_elo:
stronger += 1
else:
weaker += 1
stats["opponentaverage"] = float(opponent_elosum) / numberofgames_last3months
stats["stronger"] = float(stronger) / numberofgames_last3months * 100
stats["weaker"] = float(weaker) / numberofgames_last3months * 100
most_frequent_opponents = {}
for game in allgames:
score = most_frequent_opponents.get(game.opponent_name, [0, 0, 0, 0])
if game.self_white:
myresult = game.result.split("-")[0]
else:
myresult = game.result.split("-")[1]
score[0] += 1
if myresult == '1':
score[1] += 1
if myresult == '1/2':
score[2] += 1
if myresult == '0':
score[3] += 1
most_frequent_opponents[game.opponent_name] = score
most_frequent_opponents = [(v, k) for k, v in most_frequent_opponents.items()]
most_frequent_opponents = sorted(most_frequent_opponents, reverse=True)
strongest_opponents_won = ChessGame.objects.won_games().values_list('opponent_elo', 'opponent_name', 'date')
strongest_opponents_won = sorted(strongest_opponents_won, reverse=True)
# return the response
return render_to_response('stats.html', {
'player': player,
'stats': stats,
'won_tally': won_tally,
'drawn_tally': drawn_tally,
'lost_tally': lost_tally,
'most_frequent_opponents': most_frequent_opponents[:15],
'strongest_opponents_won': strongest_opponents_won[:15],
'last100games': allgames.reverse()[number_of_games - 100:number_of_games]
}, context_instance=RequestContext(request))
| 7,205
| 0
| 23
|
c61e98f14c9a76d67981dbcf26cd371477e7bf9b
| 982
|
py
|
Python
|
tools/perf/cli_tools/flakiness_cli/cached_api.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
tools/perf/cli_tools/flakiness_cli/cached_api.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
tools/perf/cli_tools/flakiness_cli/cached_api.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import hashlib
from cli_tools.flakiness_cli import api
from cli_tools.flakiness_cli import frames
def GetBuilders():
"""Get the builders data frame and keep a cached copy."""
return frames.GetWithCache(
'builders.pkl', make_frame, expires_after=datetime.timedelta(hours=12))
def GetTestResults(master, builder, test_type):
"""Get a test results data frame and keep a cached copy."""
basename = hashlib.md5('/'.join([master, builder, test_type])).hexdigest()
return frames.GetWithCache(
basename + '.pkl', make_frame, expires_after=datetime.timedelta(hours=3))
| 31.677419
| 79
| 0.746436
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import hashlib
from cli_tools.flakiness_cli import api
from cli_tools.flakiness_cli import frames
def GetBuilders():
"""Get the builders data frame and keep a cached copy."""
def make_frame():
data = api.GetBuilders()
return frames.BuildersDataFrame(data)
return frames.GetWithCache(
'builders.pkl', make_frame, expires_after=datetime.timedelta(hours=12))
def GetTestResults(master, builder, test_type):
"""Get a test results data frame and keep a cached copy."""
def make_frame():
data = api.GetTestResults(master, builder, test_type)
return frames.TestResultsDataFrame(data)
basename = hashlib.md5('/'.join([master, builder, test_type])).hexdigest()
return frames.GetWithCache(
basename + '.pkl', make_frame, expires_after=datetime.timedelta(hours=3))
| 166
| 0
| 48
|
04140bf8dad7ca4dcbe83e3c3ccd08b76ceba45a
| 1,373
|
py
|
Python
|
alipay/aop/api/response/AlipayPcreditHuabeiRpacrawlerQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayPcreditHuabeiRpacrawlerQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayPcreditHuabeiRpacrawlerQueryResponse.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.RpaCrawlerTaskVO import RpaCrawlerTaskVO
| 31.930233
| 115
| 0.671522
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.RpaCrawlerTaskVO import RpaCrawlerTaskVO
class AlipayPcreditHuabeiRpacrawlerQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditHuabeiRpacrawlerQueryResponse, self).__init__()
self._task_list = None
self._total_count = None
@property
def task_list(self):
return self._task_list
@task_list.setter
def task_list(self, value):
if isinstance(value, list):
self._task_list = list()
for i in value:
if isinstance(i, RpaCrawlerTaskVO):
self._task_list.append(i)
else:
self._task_list.append(RpaCrawlerTaskVO.from_alipay_dict(i))
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, value):
self._total_count = value
def parse_response_content(self, response_content):
response = super(AlipayPcreditHuabeiRpacrawlerQueryResponse, self).parse_response_content(response_content)
if 'task_list' in response:
self.task_list = response['task_list']
if 'total_count' in response:
self.total_count = response['total_count']
| 877
| 279
| 23
|
f4fd19ffcd413cc3ea9cef920574e76c57546a52
| 1,323
|
py
|
Python
|
appengine/monorail/api/prpc/grpc/beta/implementations.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | 1
|
2018-01-02T05:47:07.000Z
|
2018-01-02T05:47:07.000Z
|
appengine/monorail/api/prpc/grpc/beta/implementations.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/monorail/api/prpc/grpc/beta/implementations.py
|
mithro/chromium-infra
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""This file exists so that code in gRPC *_pb2.py files is importable.
The protoc compiler for gRPC .proto files produces Python code which contains
two separate code-paths. One codepath just requires importing grpc.py; the
other uses the beta interface. Since we are relying on the former codepath,
this file doesn't need to contain any actual implementation. It just needs
to contain the symbols that the _pb2.py file expects to find when it imports
the module.
"""
| 37.8
| 77
| 0.798942
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""This file exists so that code in gRPC *_pb2.py files is importable.
The protoc compiler for gRPC .proto files produces Python code which contains
two separate code-paths. One codepath just requires importing grpc.py; the
other uses the beta interface. Since we are relying on the former codepath,
this file doesn't need to contain any actual implementation. It just needs
to contain the symbols that the _pb2.py file expects to find when it imports
the module.
"""
def server_options(
_multi_method_implementation=None, _request_deserializers=None,
_response_serializers=None, _thread_pool=None, _thread_pool_size=None,
_default_timeout=None, _maximum_timeout=None):
raise NotImplementedError()
def server(_service_implementations, _options=None):
raise NotImplementedError()
def stub_options(
_host=None, _request_serializers=None, _response_deserializers=None,
_metadata_transformer=None, _thread_pool=None, _thread_pool_size=None):
raise NotImplementedError()
def dynamic_stub(_channel, _service, _cardinalities, _options=None):
raise NotImplementedError()
| 535
| 0
| 92
|
46b2d944f6aa3d20390e129f4e78bec6acad3cb7
| 5,064
|
py
|
Python
|
view.py
|
KungPaoChick/Github-View
|
1395fb6390a1cd9d0f789e4f2efd85953d538d1a
|
[
"MIT"
] | 1
|
2021-03-22T09:11:24.000Z
|
2021-03-22T09:11:24.000Z
|
view.py
|
KungPaoChick/Github-View
|
1395fb6390a1cd9d0f789e4f2efd85953d538d1a
|
[
"MIT"
] | null | null | null |
view.py
|
KungPaoChick/Github-View
|
1395fb6390a1cd9d0f789e4f2efd85953d538d1a
|
[
"MIT"
] | null | null | null |
import requests
import argparse
import colorama
import os
import csv
import pandas as pd
from bs4 import BeautifulSoup as soup
if __name__ == '__main__':
colorama.init()
url = 'https://github.com'
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description="Views Public Repositories of Users")
parser.add_argument('-c', '--clone', nargs='+',
metavar='CLONE', action='store',
help="Clones and Views Public Repositories from the user/s. (e.g -c KungPaoChick uname2 uname3)")
parser.add_argument('-u', '--username',
nargs='+', metavar='USERNAMES',
action='store',
help="Views Public Repositories from the user/s. (e.g -u KungPaoChick uname2 uname3)")
args = parser.parse_args()
if args.clone or args.username:
for name in args.username or args.clone:
if not os.path.exists(name):
os.mkdir(name)
conn(name, url)
| 40.512
| 135
| 0.532188
|
import requests
import argparse
import colorama
import os
import csv
import pandas as pd
from bs4 import BeautifulSoup as soup
def repos(username, links):
with open(os.path.join(name, f'{username}.csv'), 'w', encoding='utf-8') as f:
headers = ['Link', 'Repository', 'Commits',
'Stars', 'Forks', 'Contributors']
writer = csv.writer(f, dialect='excel')
writer.writerow(headers)
print(colorama.Fore.YELLOW,
f'[!] {name} has {len(links)} Public Repositor{plural_ies(len(links))}\n',
colorama.Style.RESET_ALL)
for link in links:
try:
with requests.get(link) as rep_response:
rep_response.raise_for_status()
rep_soup = soup(rep_response.text, 'html.parser')
my_data = [link]
# gets repo name
for repo_name in rep_soup.findAll('a', {'data-pjax': '#js-repo-pjax-container'}):
print(colorama.Fore.GREEN,
f'[*] {repo_name.text}', colorama.Style.RESET_ALL)
my_data.append(repo_name.text)
if args.clone:
os.system(f'git clone {link}.git ~/Documents/{repo_name.text}')
# gets number of commits to the repository
my_data.append([x.text.split() for x in rep_soup.findAll(
'ul', {'class': 'list-style-none d-flex'})][0].pop(0))
# gets description of the repository
with open(os.path.join(name, f'{repo_name.text}.txt'), 'w') as repo_des:
abouts = {'None': [x.text for x in rep_soup.findAll('div', {'class': 'f4 mt-3 color-text-secondary text-italic'})],
'About': [x.text for x in rep_soup.findAll('p', {'class': 'f4 mt-3'})]}
if abouts['About'] == []:
os.remove(os.path.join(name, f'{repo_name.text}.txt'))
else:
for info in abouts['About']:
repo_des.write(info)
# gets star count
my_data.append([star.text.split() for star in rep_soup.findAll(
'a', {'href': f'{link.split(url)[1]}/stargazers'})].pop(0)[0])
# gets fork count
my_data.append([fork.text.split() for fork in rep_soup.findAll(
'a', {'href': f'{link.split(url)[1]}/network/members'})].pop(0)[0])
con = []
for contributor in [x['title'] for x in rep_soup.findAll('span', {'class': 'Counter'})]:
con.append(contributor)
if int(con[-1]) == 0:
my_data.append(1)
else:
my_data.append(con[-1])
writer.writerows([my_data])
except requests.HTTPError as err:
print(colorama.Fore.RED,
f'[!!] Something went wrong! {err}',
colorama.Style.RESET_ALL)
return read_data(os.path.join(os.getcwd(), name), f'{username}.csv')
def read_data(path, filename):
df = pd.read_csv(os.path.join(path, filename))
pd.set_option('display.max_rows', None)
df.drop(['Link'], axis=1, inplace=True)
print(f'"{filename}" Data Frame:\n\n{df}')
def plural_ies(v):
return 'ies' if not abs(v) == 1 else 'y'
def conn(uname, main_url):
try:
with requests.get(f'{main_url}/{uname}?tab=repositories') as response:
response.raise_for_status()
page_soup = soup(response.text, 'html.parser')
links = []
for repo in page_soup.findAll('a', {'itemprop': 'name codeRepository'}):
links.append(f"{main_url}{repo['href']}")
for name in page_soup.findAll('span', {'itemprop': 'name'}):
return repos(''.join(name.text.split()), links)
except requests.HTTPError as err:
print(colorama.Fore.RED,
f'[!!] Something went wrong! {err}',
colorama.Style.RESET_ALL)
if __name__ == '__main__':
colorama.init()
url = 'https://github.com'
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description="Views Public Repositories of Users")
parser.add_argument('-c', '--clone', nargs='+',
metavar='CLONE', action='store',
help="Clones and Views Public Repositories from the user/s. (e.g -c KungPaoChick uname2 uname3)")
parser.add_argument('-u', '--username',
nargs='+', metavar='USERNAMES',
action='store',
help="Views Public Repositories from the user/s. (e.g -u KungPaoChick uname2 uname3)")
args = parser.parse_args()
if args.clone or args.username:
for name in args.username or args.clone:
if not os.path.exists(name):
os.mkdir(name)
conn(name, url)
| 3,888
| 0
| 92
|
9b3e086c5f8b68c1abcbb918ec7ee8071dbf97bd
| 970
|
py
|
Python
|
tests/rules/test_string_pairs_equal.py
|
chuxuantinh/vakt
|
30be4880e666a0f5dbe4dd1501870bae54822a5b
|
[
"Apache-2.0"
] | 132
|
2018-10-29T14:58:26.000Z
|
2022-03-04T10:43:26.000Z
|
tests/rules/test_string_pairs_equal.py
|
chuxuantinh/vakt
|
30be4880e666a0f5dbe4dd1501870bae54822a5b
|
[
"Apache-2.0"
] | 42
|
2018-08-31T10:41:43.000Z
|
2021-08-01T08:19:06.000Z
|
tests/rules/test_string_pairs_equal.py
|
chuxuantinh/vakt
|
30be4880e666a0f5dbe4dd1501870bae54822a5b
|
[
"Apache-2.0"
] | 22
|
2018-12-24T03:42:46.000Z
|
2022-03-16T04:42:25.000Z
|
import pytest
from vakt.rules.string import PairsEqual, StringPairsEqualRule
@pytest.mark.parametrize('against, result', [
([[]], False),
([], True),
("not-list", False),
([['a']], False),
([['a', 'a']], True),
([['й', 'й']], True),
([[1, '1']], False),
([['1', 1]], False),
([[1, 1]], False),
([[1.0, 1.0]], False),
([['a', 'b']], False),
([['a', 'b', 'c']], False),
([['a', 'a'], ['b', 'b']], True),
([['a', 'a'], ['b', 'c']], False),
])
| 30.3125
| 87
| 0.554639
|
import pytest
from vakt.rules.string import PairsEqual, StringPairsEqualRule
@pytest.mark.parametrize('against, result', [
([[]], False),
([], True),
("not-list", False),
([['a']], False),
([['a', 'a']], True),
([['й', 'й']], True),
([[1, '1']], False),
([['1', 1]], False),
([[1, 1]], False),
([[1.0, 1.0]], False),
([['a', 'b']], False),
([['a', 'b', 'c']], False),
([['a', 'a'], ['b', 'b']], True),
([['a', 'a'], ['b', 'c']], False),
])
def test_string_pairs_equal_satisfied(against, result):
c = PairsEqual()
assert result == c.satisfied(against)
# test after (de)serialization
assert result == PairsEqual.from_json(PairsEqual().to_json()).satisfied(against)
# test deprecated class
with pytest.deprecated_call():
c = StringPairsEqualRule()
assert result == c.satisfied(against)
assert result == StringPairsEqualRule.from_json(c.to_json()).satisfied(against)
| 449
| 0
| 22
|
efb0ebee4c7b1376a0afa2caeb0abdf143336a6d
| 1,469
|
py
|
Python
|
code_scanner/code_scanner/file_utils.py
|
stargrep/rmm-utils
|
55725cb117015bbe35653120779c53ff39a999bc
|
[
"MIT"
] | null | null | null |
code_scanner/code_scanner/file_utils.py
|
stargrep/rmm-utils
|
55725cb117015bbe35653120779c53ff39a999bc
|
[
"MIT"
] | null | null | null |
code_scanner/code_scanner/file_utils.py
|
stargrep/rmm-utils
|
55725cb117015bbe35653120779c53ff39a999bc
|
[
"MIT"
] | null | null | null |
from functools import reduce
from pathlib import Path
from code_scanner.enums import FileType
from code_scanner.file_info import FileInfo
from code_scanner.filter_utils import IFileFilter
| 32.644444
| 97
| 0.653506
|
from functools import reduce
from pathlib import Path
from code_scanner.enums import FileType
from code_scanner.file_info import FileInfo
from code_scanner.filter_utils import IFileFilter
def retrieve_all_folders(root: Path,
filters: [IFileFilter],
include_root: bool = True,
recursive: bool = True) -> [FileInfo]:
filtered = list_folders(root, filters)
if recursive:
to_visit = [i for i in filtered]
while len(to_visit) > 0:
collected = list_folders(to_visit.pop(), filters)
to_visit += collected
filtered += collected
filtered = list(map(convert, filtered))
if include_root:
filtered.append(convert(root))
return filtered
return filtered
def list_folders(root: Path, filters: [IFileFilter]) -> [Path]:
return list(reduce(lambda prev, f: f.filter(prev), filters, list(root.glob("*"))))
def retrieve_files(folders: [FileInfo], filters: [IFileFilter]) -> [FileInfo]:
all_files = reduce(lambda prev, folder: prev + list(folder.full_name.glob("*")), folders, [])
filtered = reduce(lambda prev, f: f.filter(prev), filters, all_files)
return list(map(convert, filtered))
def convert(path: Path) -> FileInfo:
if path.is_file():
return FileInfo(path, FileType.SOURCE_CODE)
elif path.is_dir():
return FileInfo(path, FileType.DIR_SOURCE)
return FileInfo(path)
| 1,185
| 0
| 92
|
4533666d2d659e745b97da7e3fecdf5f628cc17e
| 1,212
|
py
|
Python
|
project/apps/accounts/migrations/0007_auto_20211111_1519.py
|
DiceNameIsMy/social-network
|
da02ea27c21705449c4ff3913d79e808d33e3de4
|
[
"MIT"
] | null | null | null |
project/apps/accounts/migrations/0007_auto_20211111_1519.py
|
DiceNameIsMy/social-network
|
da02ea27c21705449c4ff3913d79e808d33e3de4
|
[
"MIT"
] | 2
|
2021-11-01T04:17:19.000Z
|
2021-11-05T07:40:43.000Z
|
project/apps/accounts/migrations/0007_auto_20211111_1519.py
|
DiceNameIsMy/social-network
|
da02ea27c21705449c4ff3913d79e808d33e3de4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-11 09:19
from django.db import migrations, models
import django.db.models.deletion
| 34.628571
| 271
| 0.607261
|
# Generated by Django 3.2.8 on 2021-11-11 09:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('accounts', '0006_alter_notification_content_type'),
]
operations = [
migrations.AlterModelOptions(
name='notification',
options={'ordering': ['-created_at']},
),
migrations.AddField(
model_name='notification',
name='extra',
field=models.JSONField(default=dict),
),
migrations.AlterField(
model_name='notification',
name='content_type',
field=models.ForeignKey(limit_choices_to=models.Q(models.Q(('app_label', 'accounts'), ('model', 'customuser')), models.Q(('app_label', 'chats'), ('model', 'chat')), _connector='OR'), on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype'),
),
migrations.AlterField(
model_name='notification',
name='type',
field=models.IntegerField(choices=[(1, 'Chat Message'), (2, 'Friend Request')]),
),
]
| 0
| 1,065
| 23
|
fa87d1dd7dae39daf67a20a481e554aa0260f9b0
| 479
|
py
|
Python
|
example.py
|
zietzm/dfs-sudoku-solver
|
396320c84823baa6f18d69fb9c315af08f580096
|
[
"MIT"
] | null | null | null |
example.py
|
zietzm/dfs-sudoku-solver
|
396320c84823baa6f18d69fb9c315af08f580096
|
[
"MIT"
] | null | null | null |
example.py
|
zietzm/dfs-sudoku-solver
|
396320c84823baa6f18d69fb9c315af08f580096
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import solver
board = np.array([
[0,0,0,0,9,0,8,2,3],
[0,8,0,0,3,2,0,7,5],
[3,0,2,5,8,0,4,9,0],
[0,2,7,0,0,0,0,0,4],
[0,9,0,2,1,4,0,8,0],
[4,0,0,0,0,0,2,0,0],
[0,4,0,0,7,1,0,0,2],
[2,0,0,9,4,0,0,5,0],
[0,0,6,0,2,5,0,4,0]
])
b = solver.SudokuSolver(board)
t1 = time.time()
b.solve()
t2 = time.time() - t1
assert b.valid_board()
print(f"Time: {t2} seconds")
print(f"Steps: {b.num_steps}")
print(b.board)
| 14.515152
| 30
| 0.524008
|
import time
import numpy as np
import solver
board = np.array([
[0,0,0,0,9,0,8,2,3],
[0,8,0,0,3,2,0,7,5],
[3,0,2,5,8,0,4,9,0],
[0,2,7,0,0,0,0,0,4],
[0,9,0,2,1,4,0,8,0],
[4,0,0,0,0,0,2,0,0],
[0,4,0,0,7,1,0,0,2],
[2,0,0,9,4,0,0,5,0],
[0,0,6,0,2,5,0,4,0]
])
b = solver.SudokuSolver(board)
t1 = time.time()
b.solve()
t2 = time.time() - t1
assert b.valid_board()
print(f"Time: {t2} seconds")
print(f"Steps: {b.num_steps}")
print(b.board)
| 0
| 0
| 0
|
754f0eb1193c812774762375390c0ce0266b9d41
| 464
|
py
|
Python
|
config/configuration.py
|
TechhubLisbon/rpi_truck
|
5e024464f064ba7618a7e53b88a413d23ba20dce
|
[
"MIT"
] | 3
|
2019-12-15T04:08:22.000Z
|
2021-11-28T16:34:08.000Z
|
config/configuration.py
|
TechhubLisbon/rpi_truck
|
5e024464f064ba7618a7e53b88a413d23ba20dce
|
[
"MIT"
] | 1
|
2021-06-02T00:21:10.000Z
|
2021-06-02T00:21:10.000Z
|
config/configuration.py
|
TechhubLisbon/rpi_truck
|
5e024464f064ba7618a7e53b88a413d23ba20dce
|
[
"MIT"
] | 1
|
2019-07-05T13:33:38.000Z
|
2019-07-05T13:33:38.000Z
|
#!/usr/bin/env python
# Root topic
rootTopic = "truck1"
# Broker configuration
mqttBroker = "192.168.1.126"
mqttPort = "1883"
mqttUser = " "
mqttPasswd = " "
# Components configuration
componentDic = {
"imuClass": "Imu",
"proximityClass": "ProximitySensor",
"motorClass": "Motor",
"cameraClass": "Camera"}
componentsSamplingIntevalInSeconds = {
"imuClass": 0.1,
"proximityClass": 0.4,
"motorClass": 10.0,
"cameraClass": 100.0}
| 18.56
| 40
| 0.653017
|
#!/usr/bin/env python
# Root topic
rootTopic = "truck1"
# Broker configuration
mqttBroker = "192.168.1.126"
mqttPort = "1883"
mqttUser = " "
mqttPasswd = " "
# Components configuration
componentDic = {
"imuClass": "Imu",
"proximityClass": "ProximitySensor",
"motorClass": "Motor",
"cameraClass": "Camera"}
componentsSamplingIntevalInSeconds = {
"imuClass": 0.1,
"proximityClass": 0.4,
"motorClass": 10.0,
"cameraClass": 100.0}
| 0
| 0
| 0
|
1c7bbfdad3d0bd492d8072ec6d86909275b5cf2c
| 454
|
py
|
Python
|
src/exmample.py
|
liliangbin/twist-to-wheel-speed
|
5a25b08a50b5f828a783d7e8723c64955528c76d
|
[
"Apache-2.0"
] | 1
|
2020-06-13T23:48:27.000Z
|
2020-06-13T23:48:27.000Z
|
src/exmample.py
|
liliangbin/twist-to-wheel-speed
|
5a25b08a50b5f828a783d7e8723c64955528c76d
|
[
"Apache-2.0"
] | null | null | null |
src/exmample.py
|
liliangbin/twist-to-wheel-speed
|
5a25b08a50b5f828a783d7e8723c64955528c76d
|
[
"Apache-2.0"
] | null | null | null |
import time
import RPi.GPIO as gpio
pin = 33
pin_wheel = 35
while True:
print 'go straight '
gpio.setmode(gpio.BOARD)
gpio.setup(pin, gpio.OUT)
gpio.setup(pin_wheel, gpio.OUT)
gpio.output(pin_wheel, gpio.HIGH)
p = gpio.PWM(pin, 400)
p.start(0)
dc = 10
for i in range(40):
dc += 2
print 'dc:', dc
p.ChangeDutyCycle(dc)
time.sleep(0.3);
p.stop()
gpio.cleanup()
print 'done'
| 18.916667
| 37
| 0.581498
|
import time
import RPi.GPIO as gpio
pin = 33
pin_wheel = 35
while True:
print 'go straight '
gpio.setmode(gpio.BOARD)
gpio.setup(pin, gpio.OUT)
gpio.setup(pin_wheel, gpio.OUT)
gpio.output(pin_wheel, gpio.HIGH)
p = gpio.PWM(pin, 400)
p.start(0)
dc = 10
for i in range(40):
dc += 2
print 'dc:', dc
p.ChangeDutyCycle(dc)
time.sleep(0.3);
p.stop()
gpio.cleanup()
print 'done'
| 0
| 0
| 0
|
8cf5b17f3cc5a4e711432a819079968050d16523
| 655
|
py
|
Python
|
exercise/Bai1.py
|
nguyenquanghieu2000d/HocMayNangCao
|
2c6df4527f4661840c1d0bea2c42d5f18309e360
|
[
"Apache-2.0"
] | null | null | null |
exercise/Bai1.py
|
nguyenquanghieu2000d/HocMayNangCao
|
2c6df4527f4661840c1d0bea2c42d5f18309e360
|
[
"Apache-2.0"
] | null | null | null |
exercise/Bai1.py
|
nguyenquanghieu2000d/HocMayNangCao
|
2c6df4527f4661840c1d0bea2c42d5f18309e360
|
[
"Apache-2.0"
] | null | null | null |
# Bài tập 2. Sử dụng thư viện pandas đọc vào file Iris.csv được biến bộ nhớ df.
# 2a. Hiển thị df.
# 2b. Chuyển cột nhãn y Species thành dạng dữ liệu mã hóa OHE. Hiển thị dữ liệu được mã hóa.
# 2c. Tạo ra cột vector đầu vào x, và cột nhãn vector đầu ra y của df. Hiển thị x và y.
# G:
import pandas as pd
# 2a
df = pd.read_csv("../data/Iris.csv")
print(df)
# 2b
one_hot_encoded_data = pd.get_dummies(df, columns=['Species'])
print(one_hot_encoded_data)
# 2c
x = df[['SepalWidthCm', 'SepalLengthCm', 'PetalLengthCm', 'PetalWidthCm']]
print(x)
y = one_hot_encoded_data[['Species_Iris-setosa', 'Species_Iris-versicolor', 'Species_Iris-virginica']]
print(y)
| 34.473684
| 102
| 0.719084
|
# Bài tập 2. Sử dụng thư viện pandas đọc vào file Iris.csv được biến bộ nhớ df.
# 2a. Hiển thị df.
# 2b. Chuyển cột nhãn y Species thành dạng dữ liệu mã hóa OHE. Hiển thị dữ liệu được mã hóa.
# 2c. Tạo ra cột vector đầu vào x, và cột nhãn vector đầu ra y của df. Hiển thị x và y.
# G:
import pandas as pd
# 2a
df = pd.read_csv("../data/Iris.csv")
print(df)
# 2b
one_hot_encoded_data = pd.get_dummies(df, columns=['Species'])
print(one_hot_encoded_data)
# 2c
x = df[['SepalWidthCm', 'SepalLengthCm', 'PetalLengthCm', 'PetalWidthCm']]
print(x)
y = one_hot_encoded_data[['Species_Iris-setosa', 'Species_Iris-versicolor', 'Species_Iris-virginica']]
print(y)
| 0
| 0
| 0
|
73f55c453ed7f9744328b2e5877299831b238e6b
| 3,214
|
py
|
Python
|
entries/day16/main.py
|
ZeldaZach/AdventOfCode2021
|
3eefd3dcaeb331457c4fc58866705aa6c6580830
|
[
"MIT"
] | 1
|
2022-01-02T11:00:04.000Z
|
2022-01-02T11:00:04.000Z
|
entries/day16/main.py
|
ZeldaZach/AdventOfCode2021
|
3eefd3dcaeb331457c4fc58866705aa6c6580830
|
[
"MIT"
] | null | null | null |
entries/day16/main.py
|
ZeldaZach/AdventOfCode2021
|
3eefd3dcaeb331457c4fc58866705aa6c6580830
|
[
"MIT"
] | null | null | null |
import pathlib
from functools import reduce
from typing import List, Tuple
if __name__ == "__main__":
main()
| 28.954955
| 86
| 0.632234
|
import pathlib
from functools import reduce
from typing import List, Tuple
def read_inputs(input_file: str) -> str:
with pathlib.Path(input_file).open() as fp:
return str(fp.readline()).strip()
def part1() -> int:
# 35 minutes
line = read_inputs("input.txt")
total = ""
for char in line:
x = bin(int(char, 16))[2:].zfill(4)
total += x
return operate_on_data_recursively(total)[0]
def part2() -> int:
# 14 minutes
line = read_inputs("input.txt")
total = ""
for char in line:
x = bin(int(char, 16))[2:].zfill(4)
total += x
return operate_on_data_recursively(total)[2]
def operate_on_data_recursively(bin_data: str) -> Tuple[int, int, int]:
version = int(bin_data[0:3], 2)
type_id = int(bin_data[3:6], 2)
version_sum_found, offset_bits, inner_values = (
operate_on_inner_packets_recursively(bin_data) if type_id != 4 else (0, 0, 0)
)
version_sum = version + version_sum_found
if type_id == 0:
return version_sum, offset_bits, sum(x for x in inner_values)
if type_id == 1:
return version_sum, offset_bits, reduce(lambda x, y: x * y, inner_values)
if type_id == 2:
return version_sum, offset_bits, min(inner_values)
if type_id == 3:
return version_sum, offset_bits, max(inner_values)
if type_id == 4:
all_bits_in_number = ""
offset_bits = 6
while bin_data[offset_bits] == "1":
all_bits_in_number += bin_data[offset_bits + 1 : offset_bits + 5]
offset_bits += 5
all_bits_in_number += bin_data[offset_bits + 1 : offset_bits + 5]
value = int(all_bits_in_number, 2)
return version_sum, offset_bits + 5, value
if type_id == 5:
return version_sum, offset_bits, int(inner_values[0] > inner_values[1])
if type_id == 6:
return version_sum, offset_bits, int(inner_values[0] < inner_values[1])
if type_id == 7:
return version_sum, offset_bits, int(inner_values[0] == inner_values[1])
def operate_on_inner_packets_recursively(bin_data: str) -> Tuple[int, int, List[int]]:
inner_values = []
version_sum = 0
type_id_length = int(bin_data[6], 2)
if type_id_length:
offset_bits = 18
total_number_of_sub_packets = int(bin_data[7:18], 2)
for _ in range(total_number_of_sub_packets):
version_sum_found, bits_used, value = operate_on_data_recursively(
bin_data[offset_bits:]
)
offset_bits += bits_used
inner_values.append(value)
version_sum += version_sum_found
else:
offset_bits = 22
length_of_all_sub_packets = int(bin_data[7:22], 2) + offset_bits
while offset_bits < length_of_all_sub_packets:
version_sum_found, bits_used, value = operate_on_data_recursively(
bin_data[offset_bits:]
)
offset_bits += bits_used
inner_values.append(value)
version_sum += version_sum_found
return version_sum, offset_bits, inner_values
def main() -> None:
print(part1())
print(part2())
if __name__ == "__main__":
main()
| 2,955
| 0
| 138
|
b70257688930fda0f16498d55384d7c0416d889c
| 4,179
|
py
|
Python
|
test3.py
|
fatman2021/project-manhattan
|
a24febefcdd63923c6ba7da78e4d9db95157c4bf
|
[
"Apache-2.0"
] | 2
|
2020-01-08T06:18:56.000Z
|
2021-07-24T03:05:10.000Z
|
test3.py
|
fatman2021/project-manhattan
|
a24febefcdd63923c6ba7da78e4d9db95157c4bf
|
[
"Apache-2.0"
] | null | null | null |
test3.py
|
fatman2021/project-manhattan
|
a24febefcdd63923c6ba7da78e4d9db95157c4bf
|
[
"Apache-2.0"
] | 1
|
2021-07-24T03:05:11.000Z
|
2021-07-24T03:05:11.000Z
|
# Start of file
import bpy
bpy.context.scene.render.engine = 'CYCLES'
bpy.context.scene.render.resolution_x = 320
bpy.context.scene.render.resolution_y = 208
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.render.image_settings.file_format = 'BMP'
bpy.context.scene.render.tile_x = 16
bpy.context.scene.render.tile_y = 16
bpy.context.scene.render.use_persistent_data = True
bpy.context.scene.cycles.use_progressive_refine = True
bpy.context.scene.render.use_save_buffers = True
bpy.context.scene.render.use_border = True
bpy.context.scene.cycles.device = 'CPU'
bpy.context.scene.cycles.max_bounces = 2
bpy.context.scene.cycles.min_bounces = 0
bpy.context.scene.cycles.diffuse_bounces = 0
bpy.context.scene.cycles.glossy_bounces = 0
bpy.context.scene.cycles.transmission_bounces = 2
bpy.context.scene.cycles.transparent_max_bounces = 0
bpy.context.scene.cycles.transparent_min_bounces = 0
bpy.context.scene.cycles.caustics_reflective = False
bpy.context.scene.cycles.caustics_refractive = False
bpy.context.scene.cycles.use_square_samples = True
bpy.context.scene.cycles.samples = 4
bpy.context.scene.cycles.debug_use_spatial_splits = True
bpy.context.scene.world.cycles.max_bounces = 1
bpy.context.object.data.cycles.is_portal = True
bpy.context.scene.cycles.debug_use_hair_bvh = False
bpy.data.scenes['Scene'].render.filepath = './0.bmp'
bpy.ops.object.delete(use_global=False)
bpy.ops.mesh.primitive_monkey_add()
bpy.ops.transform.translate(value=(0.0,1.0,1.0))
bpy.ops.object.shade_smooth()
bpy.ops.mesh.primitive_plane_add()
bpy.ops.transform.resize(value=(8.0,8.0,8.0))
bpy.data.objects['Lamp'].select = True
bpy.context.scene.objects.active = bpy.data.objects['Lamp']
bpy.data.lamps['Lamp'].type = "SUN"
bpy.data.lamps['Lamp'].use_nodes = True
bpy.data.lamps['Lamp'].node_tree.nodes['Emission'].inputs['Strength'].default_value = 5
bpy.data.lamps['Lamp'].node_tree.nodes["Emission"].inputs["Color"].default_value = (1.0,0.80,0.50,1.0)
bpy.data.objects['Suzanne'].select = True
bpy.context.scene.objects.active = bpy.data.objects['Suzanne']
bpy.data.materials.new('Glass')
bpy.data.materials['Glass'].use_nodes = True
bpy.data.materials['Glass'].node_tree.nodes.new(type="ShaderNodeBsdfGlass")
inp = bpy.data.materials['Glass'].node_tree.nodes["Material Output"].inputs["Surface"]
outp = bpy.data.materials['Glass'].node_tree.nodes["Glass BSDF"].outputs["BSDF"]
bpy.data.materials['Glass'].node_tree.links.new(inp,outp)
bpy.data.objects['Suzanne'].active_material = bpy.data.materials['Glass']
bpy.data.materials['Glass'].node_tree.nodes["Glass BSDF"].inputs["Color"].default_value = (1.0,0.80,0.50,1.0)
bpy.ops.mesh.primitive_monkey_add()
bpy.ops.transform.translate(value=(3.0,1.0,1.0))
bpy.ops.object.shade_smooth()
bpy.data.materials.new('Glossy')
bpy.data.materials['Glossy'].use_nodes = True
bpy.data.materials['Glossy'].node_tree.nodes.new(type="ShaderNodeBsdfGlossy")
inp = bpy.data.materials['Glossy'].node_tree.nodes["Material Output"].inputs["Surface"]
outp = bpy.data.materials['Glossy'].node_tree.nodes["Glossy BSDF"].outputs["BSDF"]
bpy.data.materials['Glossy'].node_tree.links.new(inp,outp)
bpy.data.objects['Suzanne.001'].active_material = bpy.data.materials['Glossy']
bpy.data.objects['Plane'].active_material = bpy.data.materials['Glossy']
bpy.data.materials['Glossy'].node_tree.nodes["Glossy BSDF"].inputs["Color"].default_value = (1.0,0.80,0.50,1.0)
bpy.ops.mesh.primitive_monkey_add()
bpy.ops.transform.translate(value=(-3.0,1.0,1.0))
bpy.ops.object.shade_smooth()
bpy.data.materials.new('Deffuse')
bpy.data.materials['Deffuse'].use_nodes = True
bpy.data.materials['Deffuse'].node_tree.nodes.new(type="ShaderNodeBsdfDiffuse")
inp = bpy.data.materials['Deffuse'].node_tree.nodes["Material Output"].inputs["Surface"]
outp = bpy.data.materials['Deffuse'].node_tree.nodes["Diffuse BSDF"].outputs["BSDF"]
bpy.data.materials['Deffuse'].node_tree.links.new(inp,outp)
bpy.data.objects['Suzanne.002'].active_material = bpy.data.materials['Deffuse']
bpy.data.materials['Deffuse'].node_tree.nodes["Diffuse BSDF"].inputs["Color"].default_value = (1.0,0.80,0.50,1.0)
bpy.ops.render.render(use_viewport = True, write_still=True)
# End of file
| 52.898734
| 113
| 0.775305
|
# Start of file
import bpy
bpy.context.scene.render.engine = 'CYCLES'
bpy.context.scene.render.resolution_x = 320
bpy.context.scene.render.resolution_y = 208
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.render.image_settings.file_format = 'BMP'
bpy.context.scene.render.tile_x = 16
bpy.context.scene.render.tile_y = 16
bpy.context.scene.render.use_persistent_data = True
bpy.context.scene.cycles.use_progressive_refine = True
bpy.context.scene.render.use_save_buffers = True
bpy.context.scene.render.use_border = True
bpy.context.scene.cycles.device = 'CPU'
bpy.context.scene.cycles.max_bounces = 2
bpy.context.scene.cycles.min_bounces = 0
bpy.context.scene.cycles.diffuse_bounces = 0
bpy.context.scene.cycles.glossy_bounces = 0
bpy.context.scene.cycles.transmission_bounces = 2
bpy.context.scene.cycles.transparent_max_bounces = 0
bpy.context.scene.cycles.transparent_min_bounces = 0
bpy.context.scene.cycles.caustics_reflective = False
bpy.context.scene.cycles.caustics_refractive = False
bpy.context.scene.cycles.use_square_samples = True
bpy.context.scene.cycles.samples = 4
bpy.context.scene.cycles.debug_use_spatial_splits = True
bpy.context.scene.world.cycles.max_bounces = 1
bpy.context.object.data.cycles.is_portal = True
bpy.context.scene.cycles.debug_use_hair_bvh = False
bpy.data.scenes['Scene'].render.filepath = './0.bmp'
bpy.ops.object.delete(use_global=False)
bpy.ops.mesh.primitive_monkey_add()
bpy.ops.transform.translate(value=(0.0,1.0,1.0))
bpy.ops.object.shade_smooth()
bpy.ops.mesh.primitive_plane_add()
bpy.ops.transform.resize(value=(8.0,8.0,8.0))
bpy.data.objects['Lamp'].select = True
bpy.context.scene.objects.active = bpy.data.objects['Lamp']
bpy.data.lamps['Lamp'].type = "SUN"
bpy.data.lamps['Lamp'].use_nodes = True
bpy.data.lamps['Lamp'].node_tree.nodes['Emission'].inputs['Strength'].default_value = 5
bpy.data.lamps['Lamp'].node_tree.nodes["Emission"].inputs["Color"].default_value = (1.0,0.80,0.50,1.0)
bpy.data.objects['Suzanne'].select = True
bpy.context.scene.objects.active = bpy.data.objects['Suzanne']
bpy.data.materials.new('Glass')
bpy.data.materials['Glass'].use_nodes = True
bpy.data.materials['Glass'].node_tree.nodes.new(type="ShaderNodeBsdfGlass")
inp = bpy.data.materials['Glass'].node_tree.nodes["Material Output"].inputs["Surface"]
outp = bpy.data.materials['Glass'].node_tree.nodes["Glass BSDF"].outputs["BSDF"]
bpy.data.materials['Glass'].node_tree.links.new(inp,outp)
bpy.data.objects['Suzanne'].active_material = bpy.data.materials['Glass']
bpy.data.materials['Glass'].node_tree.nodes["Glass BSDF"].inputs["Color"].default_value = (1.0,0.80,0.50,1.0)
bpy.ops.mesh.primitive_monkey_add()
bpy.ops.transform.translate(value=(3.0,1.0,1.0))
bpy.ops.object.shade_smooth()
bpy.data.materials.new('Glossy')
bpy.data.materials['Glossy'].use_nodes = True
bpy.data.materials['Glossy'].node_tree.nodes.new(type="ShaderNodeBsdfGlossy")
inp = bpy.data.materials['Glossy'].node_tree.nodes["Material Output"].inputs["Surface"]
outp = bpy.data.materials['Glossy'].node_tree.nodes["Glossy BSDF"].outputs["BSDF"]
bpy.data.materials['Glossy'].node_tree.links.new(inp,outp)
bpy.data.objects['Suzanne.001'].active_material = bpy.data.materials['Glossy']
bpy.data.objects['Plane'].active_material = bpy.data.materials['Glossy']
bpy.data.materials['Glossy'].node_tree.nodes["Glossy BSDF"].inputs["Color"].default_value = (1.0,0.80,0.50,1.0)
bpy.ops.mesh.primitive_monkey_add()
bpy.ops.transform.translate(value=(-3.0,1.0,1.0))
bpy.ops.object.shade_smooth()
bpy.data.materials.new('Deffuse')
bpy.data.materials['Deffuse'].use_nodes = True
bpy.data.materials['Deffuse'].node_tree.nodes.new(type="ShaderNodeBsdfDiffuse")
inp = bpy.data.materials['Deffuse'].node_tree.nodes["Material Output"].inputs["Surface"]
outp = bpy.data.materials['Deffuse'].node_tree.nodes["Diffuse BSDF"].outputs["BSDF"]
bpy.data.materials['Deffuse'].node_tree.links.new(inp,outp)
bpy.data.objects['Suzanne.002'].active_material = bpy.data.materials['Deffuse']
bpy.data.materials['Deffuse'].node_tree.nodes["Diffuse BSDF"].inputs["Color"].default_value = (1.0,0.80,0.50,1.0)
bpy.ops.render.render(use_viewport = True, write_still=True)
# End of file
| 0
| 0
| 0
|
a4953b714971962e388fc00a0817bc6ca26a5c39
| 1,395
|
py
|
Python
|
core/actor/visual/assets/__init__.py
|
pyfection/strategos
|
1039a069ec15b89e72eb06feca6c103cb2f2e0f5
|
[
"MIT"
] | 2
|
2018-01-07T18:56:56.000Z
|
2018-02-28T10:58:41.000Z
|
core/actor/visual/assets/__init__.py
|
pyfection/strategos
|
1039a069ec15b89e72eb06feca6c103cb2f2e0f5
|
[
"MIT"
] | 7
|
2017-12-29T00:17:11.000Z
|
2022-03-11T23:19:05.000Z
|
core/actor/visual/assets/__init__.py
|
pyfection/strategos
|
1039a069ec15b89e72eb06feca6c103cb2f2e0f5
|
[
"MIT"
] | 1
|
2019-12-13T14:42:33.000Z
|
2019-12-13T14:42:33.000Z
|
import os
from kivy.uix.image import Image
print("Warning: this module will be removed in future")
ASSET_PATH = os.path.dirname(__file__)
SIZE_MOD = 32
tiles = {
'grass': lambda **kwargs: tile_factory("grass.png", **kwargs),
'settlement': lambda **kwargs: tile_factory("settlement.png", **kwargs),
'forest': lambda **kwargs: tile_factory("forest.png", **kwargs),
'hill': lambda **kwargs: tile_factory("hill.png", **kwargs),
'mountain': lambda **kwargs: tile_factory("mountain.png", **kwargs),
'wood_bridge': lambda **kwargs: tile_factory("wood_bridge.png", **kwargs),
'river': lambda **kwargs: tile_factory("river.png", **kwargs),
}
Troop = lambda pos, **kwargs: Image(
source=os.path.join(ASSET_PATH, "troop.png"),
size_hint=(None, None),
size=(SIZE_MOD, SIZE_MOD),
pos=(pos[0] * SIZE_MOD, pos[1] * SIZE_MOD,),
**kwargs
)
Target = lambda pos, **kwargs: Image(
source=os.path.join(ASSET_PATH, "target.png"),
size_hint=(None, None),
size=(SIZE_MOD, SIZE_MOD),
pos=(pos[0] * SIZE_MOD, pos[1] * SIZE_MOD,),
color=[1, 1, 1, 1],
**kwargs
)
| 27.9
| 78
| 0.632258
|
import os
from kivy.uix.image import Image
print("Warning: this module will be removed in future")
ASSET_PATH = os.path.dirname(__file__)
SIZE_MOD = 32
def tile_factory(file_name, pos, **kwargs):
pos = pos[0] * SIZE_MOD, pos[1] * SIZE_MOD
return Image(
source=os.path.join(ASSET_PATH, "tiles", file_name),
size_hint=(None, None),
size=(SIZE_MOD, SIZE_MOD),
pos=pos,
**kwargs
)
tiles = {
'grass': lambda **kwargs: tile_factory("grass.png", **kwargs),
'settlement': lambda **kwargs: tile_factory("settlement.png", **kwargs),
'forest': lambda **kwargs: tile_factory("forest.png", **kwargs),
'hill': lambda **kwargs: tile_factory("hill.png", **kwargs),
'mountain': lambda **kwargs: tile_factory("mountain.png", **kwargs),
'wood_bridge': lambda **kwargs: tile_factory("wood_bridge.png", **kwargs),
'river': lambda **kwargs: tile_factory("river.png", **kwargs),
}
Troop = lambda pos, **kwargs: Image(
source=os.path.join(ASSET_PATH, "troop.png"),
size_hint=(None, None),
size=(SIZE_MOD, SIZE_MOD),
pos=(pos[0] * SIZE_MOD, pos[1] * SIZE_MOD,),
**kwargs
)
Target = lambda pos, **kwargs: Image(
source=os.path.join(ASSET_PATH, "target.png"),
size_hint=(None, None),
size=(SIZE_MOD, SIZE_MOD),
pos=(pos[0] * SIZE_MOD, pos[1] * SIZE_MOD,),
color=[1, 1, 1, 1],
**kwargs
)
| 255
| 0
| 23
|
44cfd9abc57cbe65c8fedb5510451d8e164c1893
| 2,214
|
py
|
Python
|
tests/test_deepl_server.py
|
ffreemt/deepl-fastapi
|
bf806f1cb3784067e764e813b581e0d21c4b739c
|
[
"MIT"
] | 3
|
2021-03-19T16:46:04.000Z
|
2021-11-23T02:12:30.000Z
|
tests/test_deepl_server.py
|
ffreemt/deepl-fastapi
|
bf806f1cb3784067e764e813b581e0d21c4b739c
|
[
"MIT"
] | null | null | null |
tests/test_deepl_server.py
|
ffreemt/deepl-fastapi
|
bf806f1cb3784067e764e813b581e0d21c4b739c
|
[
"MIT"
] | null | null | null |
"""Sanity check."""
import os
import sys
from pathlib import Path
from time import sleep
import requests
from subprocess import Popen
import portalocker
from logzero import logger
# start the server if not already started
lockfile = f'{Path(__file__).parent.parent / "deepl_fastapi" / "deepl_server.py.portalocker.lock"}'
logger.info("lockfile: %s", lockfile)
file = open(lockfile, "r+")
try:
portalocker.lock(file, portalocker.LOCK_EX | portalocker.LOCK_NB)
locked = False
portalocker.unlock(file)
except Exception:
locked = True
logger.debug("locked: %s", locked)
if not locked:
cwd = Path(__file__).absolute().parent.as_posix()
executable = f"{sys.executable}"
if os.name in ["posix"]: # linux and friends
cmd = f"nohup python -m deepl_fastapi.run_uvicorn > {cwd}" "/server.out 2>&1 &"
Popen(cmd, shell=True)
logger.info(
"fastapi server running in background, output logged to: %s/server.out",
cwd,
)
else:
try:
Popen(f"{executable} -m deepl_fastapi.run_uvicorn", shell=True)
logger.info(
"\n\t [%s] fastapi server running in background\n",
"deepl_fastapi.run_uvicorn",
)
except Exception as exc:
logger.debug(exc)
# wait for server to come up
sleep(20)
| 29.918919
| 99
| 0.590786
|
"""Sanity check."""
import os
import sys
from pathlib import Path
from time import sleep
import requests
from subprocess import Popen
import portalocker
from logzero import logger
# start the server if not already started
lockfile = f'{Path(__file__).parent.parent / "deepl_fastapi" / "deepl_server.py.portalocker.lock"}'
logger.info("lockfile: %s", lockfile)
file = open(lockfile, "r+")
try:
portalocker.lock(file, portalocker.LOCK_EX | portalocker.LOCK_NB)
locked = False
portalocker.unlock(file)
except Exception:
locked = True
logger.debug("locked: %s", locked)
if not locked:
cwd = Path(__file__).absolute().parent.as_posix()
executable = f"{sys.executable}"
if os.name in ["posix"]: # linux and friends
cmd = f"nohup python -m deepl_fastapi.run_uvicorn > {cwd}" "/server.out 2>&1 &"
Popen(cmd, shell=True)
logger.info(
"fastapi server running in background, output logged to: %s/server.out",
cwd,
)
else:
try:
Popen(f"{executable} -m deepl_fastapi.run_uvicorn", shell=True)
logger.info(
"\n\t [%s] fastapi server running in background\n",
"deepl_fastapi.run_uvicorn",
)
except Exception as exc:
logger.debug(exc)
# wait for server to come up
sleep(20)
def test_deepl_server():
try:
_ = requests.get("http://127.0.0.1:8000/text/?q=test me", verify=False)
res = str(_.json())
except Exception as exc:
logger.error(exc)
# try one more time
if os.name.lower() in ["posix"]: # linux and friends
sleep(25)
else: # Windows wait longer
sleep(40)
try:
_ = requests.get("http://127.0.0.1:8000/text/?q=test me", verify=False)
res = str(_.json())
except Exception as exc:
logger.error("2nd try: %s", exc)
res = str(exc)
# somehow Windows test dose not work on github VM
# it's alright on local Windows 10.
# TODO will fix this later
if os.name.lower() not in ["posix"]:
res = "我" + res
assert "我" in res
| 838
| 0
| 23
|
cb3421bae13876fc0385ff9720c6f39b8ad145cc
| 203
|
py
|
Python
|
cellfinder/export/brainrender.py
|
satyakam7/cellfinder
|
69d317fba30a174bf775351fb8514713c7e1984c
|
[
"BSD-3-Clause"
] | 65
|
2020-01-18T11:00:37.000Z
|
2020-09-15T11:19:01.000Z
|
cellfinder/export/brainrender.py
|
satyakam7/cellfinder
|
69d317fba30a174bf775351fb8514713c7e1984c
|
[
"BSD-3-Clause"
] | 96
|
2020-01-17T22:58:51.000Z
|
2020-10-06T10:37:31.000Z
|
cellfinder/export/brainrender.py
|
satyakam7/cellfinder
|
69d317fba30a174bf775351fb8514713c7e1984c
|
[
"BSD-3-Clause"
] | 22
|
2020-01-17T14:45:09.000Z
|
2020-09-10T14:45:26.000Z
|
import logging
import numpy as np
| 15.615385
| 49
| 0.714286
|
import logging
import numpy as np
def export_points(
points,
resolution,
output_filename,
):
logging.info("Exporting to brainrender")
np.save(output_filename, points * resolution)
| 144
| 0
| 23
|
54bd522c10610f9a3c7c414f5a4f3f8769d7c696
| 2,390
|
py
|
Python
|
reports/graphs/sleep_totals.py
|
vault-the/babybuddy
|
162f117cef019771ef6393146a0c63110e337d7f
|
[
"BSD-2-Clause"
] | null | null | null |
reports/graphs/sleep_totals.py
|
vault-the/babybuddy
|
162f117cef019771ef6393146a0c63110e337d7f
|
[
"BSD-2-Clause"
] | 5
|
2020-06-05T16:49:14.000Z
|
2022-01-13T00:34:19.000Z
|
reports/graphs/sleep_totals.py
|
vault-the/babybuddy
|
162f117cef019771ef6393146a0c63110e337d7f
|
[
"BSD-2-Clause"
] | 1
|
2021-07-08T17:59:24.000Z
|
2021-07-08T17:59:24.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
import plotly.offline as plotly
import plotly.graph_objs as go
from core.utils import duration_parts
from reports import utils
def sleep_totals(instances):
"""
Create a graph showing total time sleeping for each day.
:param instances: a QuerySet of Sleep instances.
:returns: a tuple of the the graph's html and javascript.
"""
totals = {}
for instance in instances:
start = timezone.localtime(instance.start)
end = timezone.localtime(instance.end)
if start.date() not in totals.keys():
totals[start.date()] = timezone.timedelta(seconds=0)
if end.date() not in totals.keys():
totals[end.date()] = timezone.timedelta(seconds=0)
# Account for dates crossing midnight.
if start.date() != end.date():
totals[start.date()] += end.replace(
year=start.year, month=start.month, day=start.day,
hour=23, minute=59, second=59) - start
totals[end.date()] += end - start.replace(
year=end.year, month=end.month, day=end.day, hour=0, minute=0,
second=0)
else:
totals[start.date()] += instance.duration
trace = go.Bar(
name='Total sleep',
x=list(totals.keys()),
y=[td.seconds/3600 for td in totals.values()],
hoverinfo='text',
textposition='outside',
text=[_duration_string_short(td) for td in totals.values()]
)
layout_args = utils.default_graph_layout_options()
layout_args['barmode'] = 'stack'
layout_args['title'] = '<b>Sleep Totals</b>'
layout_args['xaxis']['title'] = 'Date'
layout_args['xaxis']['rangeselector'] = utils.rangeselector_date()
layout_args['yaxis']['title'] = 'Hours of sleep'
fig = go.Figure({
'data': [trace],
'layout': go.Layout(**layout_args)
})
output = plotly.plot(fig, output_type='div', include_plotlyjs=False)
return utils.split_graph_output(output)
def _duration_string_short(duration):
"""
Format a "short" duration string without seconds precision. This is
intended to fit better in smaller spaces on a graph.
:returns: a string of the form XhXm.
"""
h, m, s = duration_parts(duration)
return '{}h{}m'.format(h, m)
| 33.194444
| 78
| 0.628452
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
import plotly.offline as plotly
import plotly.graph_objs as go
from core.utils import duration_parts
from reports import utils
def sleep_totals(instances):
"""
Create a graph showing total time sleeping for each day.
:param instances: a QuerySet of Sleep instances.
:returns: a tuple of the the graph's html and javascript.
"""
totals = {}
for instance in instances:
start = timezone.localtime(instance.start)
end = timezone.localtime(instance.end)
if start.date() not in totals.keys():
totals[start.date()] = timezone.timedelta(seconds=0)
if end.date() not in totals.keys():
totals[end.date()] = timezone.timedelta(seconds=0)
# Account for dates crossing midnight.
if start.date() != end.date():
totals[start.date()] += end.replace(
year=start.year, month=start.month, day=start.day,
hour=23, minute=59, second=59) - start
totals[end.date()] += end - start.replace(
year=end.year, month=end.month, day=end.day, hour=0, minute=0,
second=0)
else:
totals[start.date()] += instance.duration
trace = go.Bar(
name='Total sleep',
x=list(totals.keys()),
y=[td.seconds/3600 for td in totals.values()],
hoverinfo='text',
textposition='outside',
text=[_duration_string_short(td) for td in totals.values()]
)
layout_args = utils.default_graph_layout_options()
layout_args['barmode'] = 'stack'
layout_args['title'] = '<b>Sleep Totals</b>'
layout_args['xaxis']['title'] = 'Date'
layout_args['xaxis']['rangeselector'] = utils.rangeselector_date()
layout_args['yaxis']['title'] = 'Hours of sleep'
fig = go.Figure({
'data': [trace],
'layout': go.Layout(**layout_args)
})
output = plotly.plot(fig, output_type='div', include_plotlyjs=False)
return utils.split_graph_output(output)
def _duration_string_short(duration):
"""
Format a "short" duration string without seconds precision. This is
intended to fit better in smaller spaces on a graph.
:returns: a string of the form XhXm.
"""
h, m, s = duration_parts(duration)
return '{}h{}m'.format(h, m)
| 0
| 0
| 0
|
fbd362640bd13b085f1a705f65bc0ecbf6df81c9
| 1,213
|
py
|
Python
|
tests/test_driver_loader.py
|
lightcode/OVM
|
3c6c3528ef851f65d4bd75cafb8738c54fba7b6f
|
[
"MIT"
] | 1
|
2018-03-20T14:54:10.000Z
|
2018-03-20T14:54:10.000Z
|
tests/test_driver_loader.py
|
lightcode/OVM
|
3c6c3528ef851f65d4bd75cafb8738c54fba7b6f
|
[
"MIT"
] | null | null | null |
tests/test_driver_loader.py
|
lightcode/OVM
|
3c6c3528ef851f65d4bd75cafb8738c54fba7b6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from ovm.exceptions import OVMError
from ovm.drivers.driver_loader import DriverLoader
from ovm.drivers.storage.lvm import LvmDriver
from ovm.drivers.storage.file import FileDriver
from ovm.drivers.network.bridge import BridgeDriver
| 32.783784
| 67
| 0.704864
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from ovm.exceptions import OVMError
from ovm.drivers.driver_loader import DriverLoader
from ovm.drivers.storage.lvm import LvmDriver
from ovm.drivers.storage.file import FileDriver
from ovm.drivers.network.bridge import BridgeDriver
class TestDriverLoader(unittest.TestCase):
def test_to_load_lvm_driver(self):
"""this should return the lvm driver"""
dl = DriverLoader(DriverLoader.STORAGE)
driver = dl.load('lvm')
self.assertEqual(driver, LvmDriver)
def test_to_load_file_driver(self):
"""this should return the file driver"""
dl = DriverLoader(DriverLoader.STORAGE)
driver = dl.load('file')
self.assertEqual(driver, FileDriver)
def test_to_load_openvswitch_driver(self):
"""this should return the openvswitch driver"""
dl = DriverLoader(DriverLoader.NETWORK)
driver = dl.load('bridge')
self.assertEqual(driver, BridgeDriver)
def test_to_load_non_existing_driver(self):
"""this should raise a OVMError"""
dl = DriverLoader(DriverLoader.NETWORK)
self.assertRaises(OVMError, dl.load, 'non-existing-driver')
| 0
| 891
| 23
|
95cc49889fc25eba2175a039df3a4b3db8d376b7
| 805
|
py
|
Python
|
tests/proquest/test_identifier.py
|
tdilauro/simplified-circulation
|
f52d333616f63e2bff0cf1de98ef301bf152fba1
|
[
"Apache-2.0"
] | 16
|
2016-06-27T18:09:37.000Z
|
2021-12-07T15:20:52.000Z
|
tests/proquest/test_identifier.py
|
tdilauro/simplified-circulation
|
f52d333616f63e2bff0cf1de98ef301bf152fba1
|
[
"Apache-2.0"
] | 809
|
2016-01-25T15:41:05.000Z
|
2022-03-29T16:12:38.000Z
|
tests/proquest/test_identifier.py
|
tdilauro/simplified-circulation
|
f52d333616f63e2bff0cf1de98ef301bf152fba1
|
[
"Apache-2.0"
] | 19
|
2016-05-06T16:18:50.000Z
|
2021-05-01T06:33:18.000Z
|
from parameterized import parameterized
from api.proquest.identifier import ProQuestIdentifierParser
from core.model import Identifier
| 28.75
| 105
| 0.609938
|
from parameterized import parameterized
from api.proquest.identifier import ProQuestIdentifierParser
from core.model import Identifier
class TestProQuestIdentifierParser(object):
@parameterized.expand(
[
(
"incorrect_identifier",
"urn:librarysimplified.org/terms/id/Overdrive%20ID/adfcc11a-cc5b-4c82-8048-e005e4a90222",
None,
),
(
"correct_identifier",
"urn:proquest.com/document-id/12345",
(Identifier.PROQUEST_ID, "12345"),
),
]
)
def test_parse(self, _, identifier_string, expected_result):
parser = ProQuestIdentifierParser()
result = parser.parse(identifier_string)
assert expected_result == result
| 175
| 470
| 23
|
ddc68c78126e069825cbfdf07b334da827c19781
| 7,134
|
py
|
Python
|
pandamesh/common.py
|
visr/pandamesh
|
f88396c5b54ac004a2f902a2354334a776f411c8
|
[
"MIT"
] | 6
|
2021-11-30T15:35:57.000Z
|
2021-12-15T10:44:50.000Z
|
pandamesh/common.py
|
visr/pandamesh
|
f88396c5b54ac004a2f902a2354334a776f411c8
|
[
"MIT"
] | 2
|
2021-12-08T13:40:20.000Z
|
2021-12-08T13:49:26.000Z
|
pandamesh/common.py
|
Deltares/pandamesh
|
f88396c5b54ac004a2f902a2354334a776f411c8
|
[
"MIT"
] | null | null | null |
import functools
import operator
from enum import Enum
from itertools import combinations
from typing import Any, Sequence, Tuple
import geopandas as gpd
import numpy as np
import pygeos
import shapely.geometry as sg
IntArray = np.ndarray
FloatArray = np.ndarray
coord_dtype = np.dtype([("x", np.float64), ("y", np.float64)])
def overlap_shortlist(features: gpd.GeoSeries) -> Tuple[IntArray, IntArray]:
"""
Create a shortlist of polygons or linestrings indices to check against each
other using their bounding boxes.
"""
bounds = features.bounds
index_a, index_b = (
np.array(index) for index in zip(*combinations(features.index, 2))
)
df_a = bounds.loc[index_a]
df_b = bounds.loc[index_b]
# Convert to dict to get rid of clashing index.
a = {k: df_a[k].values for k in df_a}
b = {k: df_b[k].values for k in df_b}
# Touching does not count as overlap here.
overlap = (
(a["maxx"] >= b["minx"])
& (b["maxx"] >= a["minx"])
& (a["maxy"] >= b["miny"])
& (b["maxy"] >= a["miny"])
)
return index_a[overlap], index_b[overlap]
def check_features(features: gpd.GeoSeries, feature_type) -> None:
"""
Features should:
* be simple: no self-intersection
* not intersect with other features
"""
# Note: make sure to call geopandas functions rather than shapely or pygeos
# where possible. Otherwise, either conversion is required, or duplicate
# implementations, one with shapely and one with pygeos.
# Check valid
are_simple = features.is_simple
n_complex = (~are_simple).sum()
if n_complex > 0:
raise ValueError(
f"{n_complex} cases of complex {feature_type} detected: these "
" features contain self intersections"
)
if len(features) <= 1:
return
check_intersection(features, feature_type)
return
def check_linestrings(
linestrings: gpd.GeoSeries,
polygons: gpd.GeoSeries,
) -> None:
"""
Check whether linestrings are fully contained in a single polygon.
"""
check_features(linestrings, "linestring")
intersects = gpd.GeoDataFrame(geometry=linestrings).sjoin(
df=gpd.GeoDataFrame(geometry=polygons),
predicate="within",
)
n_diff = len(linestrings) - len(intersects)
if n_diff != 0:
raise ValueError(
"The same linestring detected in multiple polygons or "
"linestring detected outside of any polygon; "
"a linestring must be fully contained by a single polygon."
)
return
def check_points(
points: gpd.GeoSeries,
polygons: gpd.GeoSeries,
) -> None:
"""
Check whether points are contained by a polygon.
"""
within = gpd.GeoDataFrame(geometry=points).sjoin(
df=gpd.GeoDataFrame(geometry=polygons),
predicate="within",
)
n_outside = len(points) - len(within)
if n_outside != 0:
raise ValueError(f"{n_outside} points detected outside of a polygon")
return
| 31.566372
| 95
| 0.657135
|
import functools
import operator
from enum import Enum
from itertools import combinations
from typing import Any, Sequence, Tuple
import geopandas as gpd
import numpy as np
import pygeos
import shapely.geometry as sg
IntArray = np.ndarray
FloatArray = np.ndarray
coord_dtype = np.dtype([("x", np.float64), ("y", np.float64)])
def repr(obj: Any) -> str:
strings = [type(obj).__name__]
for k, v in obj.__dict__.items():
if k.startswith("_"):
k = k[1:]
if isinstance(v, np.ndarray):
s = f" {k} = np.ndarray with shape({v.shape})"
else:
s = f" {k} = {v}"
strings.append(s)
return "\n".join(strings)
def flatten(seq: Sequence[Any]):
return functools.reduce(operator.concat, seq)
def _show_options(options: Enum) -> str:
return "\n".join(map(str, options))
def invalid_option(value: Any, options: Enum) -> str:
return f"Invalid option: {value}. Valid options are:\n{_show_options(options)}"
def check_geodataframe(features: gpd.GeoDataFrame) -> None:
if not isinstance(features, gpd.GeoDataFrame):
raise TypeError(
f"Expected GeoDataFrame, received instead: {type(features).__name__}"
)
if not features.index.is_integer():
raise ValueError(
f"geodataframe index is not integer typed, received: {features.index.dtype}"
)
if features.index.duplicated().any():
raise ValueError("geodataframe index contains duplicates")
def overlap_shortlist(features: gpd.GeoSeries) -> Tuple[IntArray, IntArray]:
"""
Create a shortlist of polygons or linestrings indices to check against each
other using their bounding boxes.
"""
bounds = features.bounds
index_a, index_b = (
np.array(index) for index in zip(*combinations(features.index, 2))
)
df_a = bounds.loc[index_a]
df_b = bounds.loc[index_b]
# Convert to dict to get rid of clashing index.
a = {k: df_a[k].values for k in df_a}
b = {k: df_b[k].values for k in df_b}
# Touching does not count as overlap here.
overlap = (
(a["maxx"] >= b["minx"])
& (b["maxx"] >= a["minx"])
& (a["maxy"] >= b["miny"])
& (b["maxy"] >= a["miny"])
)
return index_a[overlap], index_b[overlap]
def intersecting_features(features, feature_type) -> Tuple[IntArray, IntArray]:
# Check all combinations where bounding boxes overlap.
index_a, index_b = overlap_shortlist(features)
unique = np.unique(np.concatenate([index_a, index_b]))
# Now do the expensive intersection check.
# Polygons that touch are allowed, but they result in intersects() == True.
# To avoid this, we create temporary geometries that are slightly smaller
# by buffering with a small negative value.
shortlist = features.loc[unique]
if feature_type == "polygon":
shortlist = shortlist.buffer(-1.0e-6)
a = shortlist.loc[index_a]
b = shortlist.loc[index_b]
# Synchronize index so there's a one to one (row to row) intersection
# check.
a.index = np.arange(len(a))
b.index = np.arange(len(b))
with_overlap = a.intersects(b).values
return index_a[with_overlap], index_b[with_overlap]
def check_intersection(features: gpd.GeoSeries, feature_type: str) -> None:
index_a, index_b = intersecting_features(features, feature_type)
n_overlap = len(index_a)
if n_overlap > 0:
message = "\n".join([f"{a} with {b}" for a, b, in zip(index_a, index_b)])
raise ValueError(
f"{n_overlap} cases of intersecting {feature_type} detected:\n{message}"
)
def check_features(features: gpd.GeoSeries, feature_type) -> None:
"""
Features should:
* be simple: no self-intersection
* not intersect with other features
"""
# Note: make sure to call geopandas functions rather than shapely or pygeos
# where possible. Otherwise, either conversion is required, or duplicate
# implementations, one with shapely and one with pygeos.
# Check valid
are_simple = features.is_simple
n_complex = (~are_simple).sum()
if n_complex > 0:
raise ValueError(
f"{n_complex} cases of complex {feature_type} detected: these "
" features contain self intersections"
)
if len(features) <= 1:
return
check_intersection(features, feature_type)
return
def check_polygons(polygons: gpd.GeoSeries) -> None:
check_features(polygons, "polygon")
def check_linestrings(
linestrings: gpd.GeoSeries,
polygons: gpd.GeoSeries,
) -> None:
"""
Check whether linestrings are fully contained in a single polygon.
"""
check_features(linestrings, "linestring")
intersects = gpd.GeoDataFrame(geometry=linestrings).sjoin(
df=gpd.GeoDataFrame(geometry=polygons),
predicate="within",
)
n_diff = len(linestrings) - len(intersects)
if n_diff != 0:
raise ValueError(
"The same linestring detected in multiple polygons or "
"linestring detected outside of any polygon; "
"a linestring must be fully contained by a single polygon."
)
return
def check_points(
points: gpd.GeoSeries,
polygons: gpd.GeoSeries,
) -> None:
"""
Check whether points are contained by a polygon.
"""
within = gpd.GeoDataFrame(geometry=points).sjoin(
df=gpd.GeoDataFrame(geometry=polygons),
predicate="within",
)
n_outside = len(points) - len(within)
if n_outside != 0:
raise ValueError(f"{n_outside} points detected outside of a polygon")
return
def separate(
gdf: gpd.GeoDataFrame,
) -> Tuple[gpd.GeoDataFrame, gpd.GeoDataFrame, gpd.GeoDataFrame]:
geom_type = gdf.geom_type
acceptable = ["Polygon", "LineString", "Point"]
if not geom_type.isin(acceptable).all():
raise TypeError(f"Geometry should be one of {acceptable}")
polygons = gdf[geom_type == "Polygon"].copy()
linestrings = gdf[geom_type == "LineString"].copy()
points = gdf[geom_type == "Point"].copy()
# Set crs to None to avoid crs warnings on joins and overlays
polygons.crs = linestrings.crs = points.crs = None
check_polygons(polygons.geometry)
check_linestrings(linestrings.geometry, polygons.geometry)
check_points(points.geometry, polygons.geometry)
return polygons, linestrings, points
def to_pygeos(geometry: Any):
first = next(iter(geometry))
if isinstance(first, pygeos.Geometry):
return geometry
elif isinstance(first, sg.base.BaseGeometry):
return pygeos.from_shapely(geometry)
else:
raise TypeError(
"geometry should be pygeos or shapely type. "
f"Received instead {type(first).__name__}"
)
def to_ugrid(vertices: FloatArray, faces: IntArray) -> "xugrid.Ugrid2d": # type: ignore # noqa
try:
import xugrid
except ImportError:
raise ImportError(
"xugrid must be installed to return generated result a xugrid.Ugrid2d"
)
return xugrid.Ugrid2d(*vertices.T, -1, faces)
| 3,816
| 0
| 253
|
11dd4e6bd5927f477fa2d99cac5acc17453ed7da
| 1,722
|
py
|
Python
|
DFL168A/J1939/PGN65263.py
|
Dafulai/DFL168A_python
|
95825a27f2c145afbd02c207c3d622d6a77ceff2
|
[
"MIT"
] | null | null | null |
DFL168A/J1939/PGN65263.py
|
Dafulai/DFL168A_python
|
95825a27f2c145afbd02c207c3d622d6a77ceff2
|
[
"MIT"
] | null | null | null |
DFL168A/J1939/PGN65263.py
|
Dafulai/DFL168A_python
|
95825a27f2c145afbd02c207c3d622d6a77ceff2
|
[
"MIT"
] | null | null | null |
import time
import DFL168A
SuccessFresh=False
| 25.701493
| 51
| 0.647503
|
import time
import DFL168A
SuccessFresh=False
def refresh():
global SuccessFresh
if not DFL168A.DigitalCommand('FEEF'):
SuccessFresh=False
return False
Temp=DFL168A.HandleResponse(DFL168A.ReturnStr)
DFL168A.ReturnStr=Temp
SuccessFresh=True
return True
def getFueDeliveryPressure():
global SuccessFresh
if not SuccessFresh:
return False,0
temp=DFL168A.ReturnStr[0:2]
temp=int(temp,16)
if temp>0xfa:
return False,0
FueDeliveryPressure=temp*4
return True, FueDeliveryPressure
def getEngineOilLevel():
global SuccessFresh
if not SuccessFresh:
return False,0.0
temp=DFL168A.ReturnStr[4:6]
temp=int(temp,16)
if temp>0xfa:
return False,0.0
EngineOilLevel=temp*0.4 # %
return True, EngineOilLevel
def getEngineOilPressure():
global SuccessFresh
if not SuccessFresh:
return False,0
temp=DFL168A.ReturnStr[6:8]
temp=int(temp,16)
if temp>0xfa:
return False,0
EngineOilPressure=temp*4
return True, EngineOilPressure
def getEngineCoolantPressure():
global SuccessFresh
if not SuccessFresh:
return False,0
temp=DFL168A.ReturnStr[12:14]
temp=int(temp,16)
if temp>0xfa:
return False,0
EngineCoolantPressure=temp*2
return True, EngineCoolantPressure
def getEngineCoolantLevel():
global SuccessFresh
if not SuccessFresh:
return False,0.0
temp=DFL168A.ReturnStr[14:16]
temp=int(temp,16)
if temp>0xfa:
return False,0.0
EngineCoolantLevel=temp*0.4 #%
return True, EngineCoolantLevel
| 1,511
| 0
| 163
|
a1417fa3635e096f11d8542953db2e1fb942aba1
| 2,096
|
py
|
Python
|
dataset/coco.py
|
aclex/detection-experiments
|
f1cbf115573a8d509553335c5904ebc2e11511d2
|
[
"Apache-2.0"
] | 5
|
2020-06-08T08:21:03.000Z
|
2021-03-03T21:54:06.000Z
|
dataset/coco.py
|
aclex/detection-experiments
|
f1cbf115573a8d509553335c5904ebc2e11511d2
|
[
"Apache-2.0"
] | 3
|
2021-02-06T20:21:02.000Z
|
2021-06-06T18:46:27.000Z
|
dataset/coco.py
|
aclex/detection-experiments
|
f1cbf115573a8d509553335c5904ebc2e11511d2
|
[
"Apache-2.0"
] | 2
|
2020-06-08T08:21:05.000Z
|
2021-02-06T11:44:04.000Z
|
import numpy as np
import logging
import cv2
import os
from torch.utils.data import Dataset
| 21.833333
| 67
| 0.71374
|
import numpy as np
import logging
import cv2
import os
from torch.utils.data import Dataset
class CocoDetection(Dataset):
bbox_format = 'coco'
def __init__(self, root, ann_file, transform=None):
"""Dataset for COCO data.
Args:
root: the root of the COCO-style dataset where images are stored
ann_file: JSON file with COCO-style dataset annotations
"""
super(CocoDetection, self).__init__()
self.root = root
self.transform = transform
from pycocotools.coco import COCO
self.coco = COCO(self._get_abs_path(ann_file))
self.ids = list(sorted(self.coco.imgs.keys()))
categories = self.coco.cats
self.class_names = ['BACKGROUND']
self.class_ids = [-1]
for cat_id, cat_name in categories.items():
self.class_names.append(cat_name["name"])
self.class_ids.append(cat_id)
def __getitem__(self, index):
image_id = self.ids[index]
boxes, labels = self._get_annotation(image_id)
image = self._read_image(image_id)
result = {
"image": image,
"bboxes": boxes,
"category_id": labels
}
if self.transform:
result = self.transform(**result)
return result
def get_image(self, index):
image_id = self.ids[index]
image = self._read_image(image_id)
if self.transform:
image, _ = self.transform(image=image)
return image
def get_annotation(self, index):
image_id = self.ids[index]
return image_id, self._get_annotation(image_id)
def __len__(self):
return len(self.ids)
def _get_annotation(self, image_id):
ann_ids = self.coco.getAnnIds(imgIds=image_id)
objects = self.coco.loadAnns(ann_ids)
boxes = []
labels = []
for object in objects:
boxes.append(object['bbox'])
labels.append(self.class_ids.index(object['category_id']))
return boxes, labels
def _get_abs_path(self, path):
if os.path.isabs(path):
return path
else:
return os.path.join(self.root, path)
def _read_image(self, image_id):
image_file = self._get_abs_path(
self.coco.loadImgs(image_id)[0]['file_name'])
image = cv2.imread(str(image_file))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
| 1,117
| 862
| 23
|
a4c648e6b4ec1df91ffe59b01b3f2b0eb107894e
| 1,471
|
py
|
Python
|
tests/test_config.py
|
pmpsa-hpc/RPackUtils
|
eedffd8b55db0fbcfd77cf8e8ef10b59b6d925f7
|
[
"Apache-2.0"
] | 1
|
2018-05-24T17:44:47.000Z
|
2018-05-24T17:44:47.000Z
|
tests/test_config.py
|
sgubianpm/RPackUtils
|
eedffd8b55db0fbcfd77cf8e8ef10b59b6d925f7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_config.py
|
sgubianpm/RPackUtils
|
eedffd8b55db0fbcfd77cf8e8ef10b59b6d925f7
|
[
"Apache-2.0"
] | null | null | null |
#######################################
# Copyright 2019 PMP SA. #
# SPDX-License-Identifier: Apache-2.0 #
#######################################
import os
import pytest
from rpackutils.config import Config
| 28.288462
| 59
| 0.558124
|
#######################################
# Copyright 2019 PMP SA. #
# SPDX-License-Identifier: Apache-2.0 #
#######################################
import os
import pytest
from rpackutils.config import Config
def test_custom_config():
configfilepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'resources/rpackutils.conf')
config = Config(configfilepath)
assert(
config.get("repositories", "artifactory_repos")
== "artifactory, artifactorydev"
)
assert(
config.get("repositories", "renvironment_repos")
== "R-3.1.2, R-3.2.5, R-3.2.2"
)
assert(
config.get("repositories", "local_repos")
== "local")
# artifactory
assert(
config.get("artifactory", "baseurl")
== "https://YOUR_ARTIFACTORY_HOSTNAME/artifactory")
assert(
config.get("artifactory", "user")
== "YOUR_ARTIFACTORY_USER")
assert(
config.get("artifactory", "password")
== "YOUR_ARTIFACTORY_PASSWORD")
assert(
config.get("artifactory", "verify")
== "/toto/Certificate_Chain.pem")
assert(
config.get("artifactory", "repos")
== "R-3.1.2, Bioc-3.0, R-local, R-Data-0.1")
assert(
not config.getboolean("R-3.1.2", "licensecheck")
)
assert(
not config.getboolean("R-3.2.2", "licensecheck")
)
assert(
config.getboolean("R-3.2.5", "licensecheck")
)
| 1,225
| 0
| 23
|
ddcd906a3c178d8cda4dd056e92fd2fdd61047ae
| 1,927
|
py
|
Python
|
movie_rs/matrix_factorization.py
|
sparshk/movie_recommender_system
|
473409f4d569291ab1badccd16c92e575ca487a5
|
[
"MIT"
] | null | null | null |
movie_rs/matrix_factorization.py
|
sparshk/movie_recommender_system
|
473409f4d569291ab1badccd16c92e575ca487a5
|
[
"MIT"
] | null | null | null |
movie_rs/matrix_factorization.py
|
sparshk/movie_recommender_system
|
473409f4d569291ab1badccd16c92e575ca487a5
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as nm
import sqlite3
from scipy.sparse.linalg import svds
from sqlalchemy import create_engine
engine=create_engine("postgres://postgres:25736534@localhost:5432/postgres")
print(user_row_number)
sorted_user_predictions = predictions_df.iloc[user_row_number].sort_values(ascending=False)
user_data = original_ratings_df[original_ratings_df.account_id == (userID)]
user_full = (user_data.merge(movies_df, how = 'left', left_on = 'movie_id', right_on = 'movie_id').
sort_values(['rating'], ascending=False))
recommendations = (movies_df[~movies_df['movie_id'].isin(user_full['movie_id'])].
merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left',
left_on = 'movie_id',
right_on = 'movie_id').
rename(columns = {user_row_number: 'Predictions'}).
sort_values('Predictions', ascending = False).
iloc[:num_recommendations, :-1]
)
return recommendations
| 45.880952
| 100
| 0.702647
|
import pandas as pd
import numpy as nm
import sqlite3
from scipy.sparse.linalg import svds
from sqlalchemy import create_engine
engine=create_engine("postgres://postgres:25736534@localhost:5432/postgres")
def recommend_movies(predictions_df, userID, movies_df, original_ratings_df, num_recommendations=5):
user_row_number = userID - 1
print(user_row_number)
sorted_user_predictions = predictions_df.iloc[user_row_number].sort_values(ascending=False)
user_data = original_ratings_df[original_ratings_df.account_id == (userID)]
user_full = (user_data.merge(movies_df, how = 'left', left_on = 'movie_id', right_on = 'movie_id').
sort_values(['rating'], ascending=False))
recommendations = (movies_df[~movies_df['movie_id'].isin(user_full['movie_id'])].
merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left',
left_on = 'movie_id',
right_on = 'movie_id').
rename(columns = {user_row_number: 'Predictions'}).
sort_values('Predictions', ascending = False).
iloc[:num_recommendations, :-1]
)
return recommendations
def calculate(x):
db=sqlite3.connect('db.sqlite3')
movies_df = pd.read_sql_query("SELECT * FROM movies",engine)
ratings_df = pd.read_sql_query("SELECT * FROM rating", engine)
R_df = ratings_df.pivot(index = 'account_id', columns ='movie_id', values = 'rating').fillna(0)
R=R_df.values
user_ratings_mean=nm.mean(R,axis=1)
R_demeaned = R - user_ratings_mean.reshape(-1, 1)
U, sigma, Vt = svds(R_demeaned, k = 1)
sigma = nm.diag(sigma)
all_user_predicted_ratings = nm.dot(nm.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)
preds_df = pd.DataFrame(all_user_predicted_ratings, columns = R_df.columns)
#print(preds_df)
predictions = recommend_movies(preds_df, x, movies_df, ratings_df, 15)
return predictions
| 852
| 0
| 46
|
0f9a997afd234ead9cc42043bcad31d9a945c6b7
| 4,721
|
py
|
Python
|
Scripts/simulation/gsi_handlers/buff_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/gsi_handlers/buff_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/gsi_handlers/buff_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\gsi_handlers\buff_handlers.py
# Compiled at: 2014-05-30 02:11:42
# Size of source mod 2**32: 4843 bytes
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import services
from protocolbuffers import Sims_pb2
sim_buff_log_schema = GsiGridSchema(label='Buffs Log', sim_specific=True)
sim_buff_log_schema.add_field('buff_id', label='Buff ID', type=(GsiFieldVisualizers.INT), width=0.5)
sim_buff_log_schema.add_field('buff_name', label='Name', width=2)
sim_buff_log_schema.add_field('equipped', label='Equip', width=1)
sim_buff_log_schema.add_field('buff_reason', label='Reason', width=1)
sim_buff_log_schema.add_field('timeout', label='Timeout', width=2)
sim_buff_log_schema.add_field('rate', label='Rate', width=2)
sim_buff_log_schema.add_field('is_mood_buff', label='Is Mood Buff', width=2)
sim_buff_log_schema.add_field('progress_arrow', label='Progress Arrow', width=2)
sim_buff_log_schema.add_field('commodity_guid', label='Commodity Guid', type=(GsiFieldVisualizers.INT), hidden=True)
sim_buff_log_schema.add_field('transition_into_buff_id', label='Next Buff ID', type=(GsiFieldVisualizers.INT), hidden=True)
sim_buff_log_archiver = GameplayArchiver('sim_buff_log', sim_buff_log_schema)
sim_mood_log_schema = GsiGridSchema(label='Mood Log', sim_specific=True)
sim_mood_log_schema.add_field('mood_id', label='Mood ID', type=(GsiFieldVisualizers.INT), width=0.5)
sim_mood_log_schema.add_field('mood_name', label='Name', width=2)
sim_mood_log_schema.add_field('mood_intensity', label='Intensity', width=2)
with sim_mood_log_schema.add_has_many('active_buffs', GsiGridSchema, label='Buffs at update') as (sub_schema):
sub_schema.add_field('buff_id', label='Buff ID')
sub_schema.add_field('buff_name', label='Buff name')
sub_schema.add_field('buff_mood', label='Buff Mood')
sub_schema.add_field('buff_mood_override', label='Mood Override (current)')
sub_schema.add_field('buff_mood_override_pending', label='Mood Override (pending)')
sim_mood_log_archiver = GameplayArchiver('sim_mood_log', sim_mood_log_schema)
| 56.879518
| 126
| 0.734802
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\gsi_handlers\buff_handlers.py
# Compiled at: 2014-05-30 02:11:42
# Size of source mod 2**32: 4843 bytes
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import services
from protocolbuffers import Sims_pb2
sim_buff_log_schema = GsiGridSchema(label='Buffs Log', sim_specific=True)
sim_buff_log_schema.add_field('buff_id', label='Buff ID', type=(GsiFieldVisualizers.INT), width=0.5)
sim_buff_log_schema.add_field('buff_name', label='Name', width=2)
sim_buff_log_schema.add_field('equipped', label='Equip', width=1)
sim_buff_log_schema.add_field('buff_reason', label='Reason', width=1)
sim_buff_log_schema.add_field('timeout', label='Timeout', width=2)
sim_buff_log_schema.add_field('rate', label='Rate', width=2)
sim_buff_log_schema.add_field('is_mood_buff', label='Is Mood Buff', width=2)
sim_buff_log_schema.add_field('progress_arrow', label='Progress Arrow', width=2)
sim_buff_log_schema.add_field('commodity_guid', label='Commodity Guid', type=(GsiFieldVisualizers.INT), hidden=True)
sim_buff_log_schema.add_field('transition_into_buff_id', label='Next Buff ID', type=(GsiFieldVisualizers.INT), hidden=True)
sim_buff_log_archiver = GameplayArchiver('sim_buff_log', sim_buff_log_schema)
def archive_buff_message(buff_msg, shows_timeout, change_rate):
buff_reason = hex(buff_msg.reason.hash) if buff_msg.HasField('reason') else None
entry = {'buff_id':buff_msg.buff_id, 'equipped':buff_msg.equipped,
'buff_reason':buff_reason,
'is_mood_buff':buff_msg.is_mood_buff,
'commodity_guid':buff_msg.commodity_guid,
'transition_into_buff_id':buff_msg.transition_into_buff_id}
manager = services.buff_manager()
if manager:
buff_cls = manager.get(buff_msg.buff_id)
entry['buff_name'] = buff_cls.__name__
elif buff_msg.equipped:
if shows_timeout:
if buff_msg.timeout:
entry['timeout'] = buff_msg.timeout
entry['rate'] = buff_msg.rate_multiplier
if change_rate is not None:
if buff_msg.buff_progress == Sims_pb2.BUFF_PROGRESS_NONE:
entry['progress_arrow'] = 'No Arrow'
else:
if buff_msg.buff_progress == Sims_pb2.BUFF_PROGRESS_UP:
entry['progress_arrow'] = 'Arrow Up'
else:
entry['progress_arrow'] = 'Arrow Down'
if buff_msg.HasField('mood_type_override'):
entry['mood_type_override'] = buff_msg.mood_type_override
sim_buff_log_archiver.archive(data=entry, object_id=(buff_msg.sim_id))
sim_mood_log_schema = GsiGridSchema(label='Mood Log', sim_specific=True)
sim_mood_log_schema.add_field('mood_id', label='Mood ID', type=(GsiFieldVisualizers.INT), width=0.5)
sim_mood_log_schema.add_field('mood_name', label='Name', width=2)
sim_mood_log_schema.add_field('mood_intensity', label='Intensity', width=2)
with sim_mood_log_schema.add_has_many('active_buffs', GsiGridSchema, label='Buffs at update') as (sub_schema):
sub_schema.add_field('buff_id', label='Buff ID')
sub_schema.add_field('buff_name', label='Buff name')
sub_schema.add_field('buff_mood', label='Buff Mood')
sub_schema.add_field('buff_mood_override', label='Mood Override (current)')
sub_schema.add_field('buff_mood_override_pending', label='Mood Override (pending)')
sim_mood_log_archiver = GameplayArchiver('sim_mood_log', sim_mood_log_schema)
def archive_mood_message(sim_id, active_mood, active_mood_intensity, active_buffs, changeable_buffs):
mood_entry = {'mood_id':active_mood.guid64,
'mood_name':active_mood.__name__,
'mood_intensity':active_mood_intensity}
active_buff_entries = []
for buff_type, buff in active_buffs.items():
buff_entry = {'buff_id':buff_type.guid64,
'buff_name':buff_type.__name__,
'buff_mood':buff.mood_type.__name__ if buff.mood_type is not None else 'None',
'buff_mood_override':buff.mood_override.__name__ if buff.mood_override is not None else 'None'}
for changeable_buff, new_mood_override in changeable_buffs:
if changeable_buff is buff:
buff_entry['buff_mood_override_pending'] = 'None' if new_mood_override is None else new_mood_override.__name__
break
active_buff_entries.append(buff_entry)
mood_entry['active_buffs'] = active_buff_entries
sim_mood_log_archiver.archive(data=mood_entry, object_id=sim_id)
| 2,351
| 0
| 46
|
f968ab8fa892c693792ae82a3470902282576d07
| 37
|
py
|
Python
|
Autokey/CapsKeybinds/shift/right.py
|
MisaghM/Capslock-Keybindings
|
00332c7d39cf776c43fe13aa08e1c2969747425d
|
[
"MIT"
] | 1
|
2021-11-05T19:39:36.000Z
|
2021-11-05T19:39:36.000Z
|
Autokey/CapsKeybinds/shift/right.py
|
MisaghM/Capslock-Keybindings
|
00332c7d39cf776c43fe13aa08e1c2969747425d
|
[
"MIT"
] | null | null | null |
Autokey/CapsKeybinds/shift/right.py
|
MisaghM/Capslock-Keybindings
|
00332c7d39cf776c43fe13aa08e1c2969747425d
|
[
"MIT"
] | null | null | null |
keyboard.send_keys("<shift>+<right>")
| 37
| 37
| 0.72973
|
keyboard.send_keys("<shift>+<right>")
| 0
| 0
| 0
|