hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f48b4a12c1d8c42b79e989d034a54fceef83d421 | 1,440 | py | Python | finrl/autotrain/trade.py | 16231108/comp3 | ba30823f20c1f82f05ccfb2f239d2be98c6a951f | [
"MIT"
] | null | null | null | finrl/autotrain/trade.py | 16231108/comp3 | ba30823f20c1f82f05ccfb2f239d2be98c6a951f | [
"MIT"
] | null | null | null | finrl/autotrain/trade.py | 16231108/comp3 | ba30823f20c1f82f05ccfb2f239d2be98c6a951f | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
matplotlib.use("Agg")
import datetime
import torch
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.env_stocktrading import StockTradingEnv
from finrl.env.lxc_env_stocktrading import lxcStockTradingEnv
from finrl.model.models import DRLAgent
from finrl.trade.backtest import backtest_stats as BackTestStats
from stable_baselines3 import A2C | 32.727273 | 82 | 0.704167 | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
matplotlib.use("Agg")
import datetime
import torch
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.env_stocktrading import StockTradingEnv
from finrl.env.lxc_env_stocktrading import lxcStockTradingEnv
from finrl.model.models import DRLAgent
from finrl.trade.backtest import backtest_stats as BackTestStats
from stable_baselines3 import A2C
def train_one(m_path,t_path):
import dill as pickle
f=open(m_path,'rb')
trained_sac=pickle.load(f)
f.close()
f=open(t_path,'rb')
e_trade_gym=pickle.load(f)
f.close()
#########################################################################
print("==============Start Trading===========")
'''
df_account_value, df_actions = DRLAgent.DRL_prediction(
model=trained_sac, test_data=trade, test_env=env_trade, test_obs=obs_trade
)
'''
df_account_value, df_actions = DRLAgent.DRL_prediction(
#model=all_model, environment=e_trade_gym
model=trained_sac, environment=e_trade_gym
)
df_account_value.to_csv(
"/df_account_value"
)
df_actions.to_csv("./" + config.RESULTS_DIR + "/df_actions_" + now + ".csv") | 783 | 0 | 23 |
6970a677248fd41f166e4281827d02f4c11b1754 | 7,509 | py | Python | gnns/hierarchical_gnet_ogbgmol_louvain.py | rampasek/HGNet | c6cc10d413270bde737d088cf2f0f66d8529abf3 | [
"MIT"
] | 10 | 2021-07-15T13:41:21.000Z | 2022-03-11T12:59:42.000Z | gnns/hierarchical_gnet_ogbgmol_louvain.py | rampasek/HGNet | c6cc10d413270bde737d088cf2f0f66d8529abf3 | [
"MIT"
] | null | null | null | gnns/hierarchical_gnet_ogbgmol_louvain.py | rampasek/HGNet | c6cc10d413270bde737d088cf2f0f66d8529abf3 | [
"MIT"
] | null | null | null | from functools import partial
import community as community_louvain
import torch
import torch.nn.functional as F
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from torch_geometric.data import Batch
from torch_geometric.nn import avg_pool
from torch_geometric.utils.convert import to_networkx
import gnns.ogbmol_conv
from gnns.gcn_wparent import GCNConvWParent
class HierarchicalGraphNet(torch.nn.Module):
"""The Hierarchical GraphNet
TODO: update docstring
"""
| 42.423729 | 120 | 0.620988 | from functools import partial
import community as community_louvain
import torch
import torch.nn.functional as F
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from torch_geometric.data import Batch
from torch_geometric.nn import avg_pool
from torch_geometric.utils.convert import to_networkx
import gnns.ogbmol_conv
from gnns.gcn_wparent import GCNConvWParent
class HierarchicalGraphNet(torch.nn.Module):
"""The Hierarchical GraphNet
TODO: update docstring
"""
def __init__(self, in_channels, hidden_channels, out_channels, depth,
no_unpool=False, dropout_ratio=0.5, normalize=True,
inter_connect='sum', act=F.relu):
super().__init__()
assert depth >= 1
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.depth = depth
self.no_unpool = no_unpool
self.dropout_ratio = dropout_ratio
self.act = act
self.normalize = normalize
assert normalize == True, 'switching normalization not implemented'
assert inter_connect in ('sum', 'concat', 'addnode'), \
f"Unknown inter-layer connection type: {inter_connect}"
self.inter_connect = inter_connect
assert hidden_channels == out_channels, "For OGB datasets expecting the same dim."
channels = hidden_channels
norm_class = torch.nn.BatchNorm1d
# norm_class = torch.nn.LayerNorm
# Convolutions going UP the hierarchy towards coarsest level
self.atom_encoder = AtomEncoder(channels)
self.up_convs = torch.nn.ModuleList()
self.up_edge_encs = torch.nn.ModuleList()
self.up_norms = torch.nn.ModuleList()
# 1st layer is a GCN with BondEncoder
self.up_convs.append(gnns.ogbmol_conv.GCNConv(channels))
self.up_edge_encs.append(BondEncoder(channels))
self.up_norms.append(norm_class(channels))
for _ in range(depth):
self.up_convs.append(gnns.ogbmol_conv.GCNConv(channels))
self.up_edge_encs.append(torch.nn.Linear(channels, channels))
self.up_norms.append(norm_class(channels))
# Convolutions going back DOWN the hierarchy from coarsest to finest level
self.down_convs = torch.nn.ModuleList()
self.down_norms = torch.nn.ModuleList()
if inter_connect == 'addnode':
GCN_class = partial(GCNConvWParent, in_dim=channels)
else:
GCN_class = gnns.ogbmol_conv.GCNConv
self.down_convs.append(GCN_class(emb_dim=channels))
self.down_norms.append(norm_class(channels))
for _ in range(depth - 1):
self.down_convs.append(GCN_class(emb_dim=channels))
self.down_norms.append(norm_class(channels))
# TODO: Actually use the separate edge encoders for up-convs?
# Now keeping the edge representation from up-conv layers
# self.down_edge_encs = torch.nn.ModuleList()
# self.down_edge_encs.append(BondEncoder(channels))
# for _ in range(depth - 1):
# self.down_edge_encs.append(torch.nn.Linear(channels, channels))
if inter_connect == 'concat':
self.down_node_encs = torch.nn.ModuleList()
for i in range(depth):
self.down_node_encs.append(torch.nn.Linear(2 * channels, channels))
def forward(self, data):
# edge_index, _ = remove_self_loops(edge_index)
# h_list = [self.atom_encoder(x)]
# x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
data.x = self.atom_encoder(data.x)
data.edge_attr = self.up_edge_encs[0](data.edge_attr)
data.x = self.up_convs[0](data.x, data.edge_index, data.edge_attr)
data.x = self.up_norms[0](data.x)
data.x = self.act(data.x)
data.x = F.dropout(data.x, self.dropout_ratio, training=self.training)
xs = [data.x]
edge_indices = [data.edge_index]
edge_attrs = [data.edge_attr]
clusters = []
precompute_clusters = True
if precompute_clusters:
cpu_data = Batch(x=torch.ones([data.x.shape[0], 1]), edge_index=data.edge_index, batch=data.batch).to('cpu')
for i in range(self.depth):
G = to_networkx(cpu_data, to_undirected=True)
dendo = community_louvain.generate_dendrogram(G, random_state=1)
partition = community_louvain.partition_at_level(dendo, 0)
# partition = community_louvain.best_partition(G)
cluster = torch.tensor(list(partition.values()))
cpu_data = avg_pool(cluster, cpu_data)
clusters.append(cluster.to(data.x.device))
for level in range(1, self.depth + 1):
if not precompute_clusters:
G = to_networkx(data, to_undirected=True)
dendo = community_louvain.generate_dendrogram(G, random_state=1)
partition = community_louvain.partition_at_level(dendo, 0)
cluster = torch.tensor(list(partition.values()), device=data.x.device)
clusters.append(cluster)
data = avg_pool(clusters[level - 1], data)
# x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
# print(f"postpool [{level}]: {x.shape}")
data.edge_attr = self.up_edge_encs[level](data.edge_attr)
data.x = self.up_convs[level](data.x, data.edge_index, data.edge_attr)
data.x = self.up_norms[level](data.x)
data.x = self.act(data.x)
data.x = F.dropout(data.x, self.dropout_ratio, training=self.training)
if level < self.depth:
xs.append(data.x)
edge_indices.append(data.edge_index)
edge_attrs.append(data.edge_attr)
# for ind, val in enumerate(xs):
# print(f" {ind}: x shape = P{val.shape}")
if self.no_unpool:
return data
x = data.x
for level in reversed(range(self.depth)):
res = xs[level]
edge_index = edge_indices[level]
edge_attr = edge_attrs[level]
cluster = clusters[level]
unpooled = x[cluster] # invert clustering i.e. "unpool"
if self.inter_connect == 'sum':
x = res + unpooled
elif self.inter_connect == 'concat':
x = torch.cat((res, unpooled), dim=-1)
x = self.down_node_encs[level](x)
elif self.inter_connect == 'addnode':
x = res
parent = unpooled
else:
assert False, f"Unexpected layer connect: {self.inter_connect}"
if self.inter_connect == 'addnode':
x = self.down_convs[level](x, parent, edge_index, edge_attr)
else:
x = self.down_convs[level](x, edge_index, edge_attr)
x = self.down_norms[level](x)
x = self.act(x) # if level > 0 else x
x = F.dropout(x, self.dropout_ratio, training=self.training)
return x
def __repr__(self):
rep = '{}({}, {}, {}, depth={}, inter_connect={}, dropout_ratio={})'.format(
self.__class__.__name__, self.in_channels, self.hidden_channels,
self.out_channels, self.depth, self.inter_connect, self.dropout_ratio)
rep += '\n'
rep += super().__repr__()
return rep
| 6,930 | 0 | 81 |
dba91d6f78e9798d67fc85264ef253aa4654464c | 1,763 | py | Python | AnnotationScripts/Defects4JSpecific/getCoverageDetails.py | UMass-COMPSCI-630/AutomatedRepairApplicabilityData | e33f0ccbfe63010cab7fe72cc05b8d289760e8a8 | [
"MIT"
] | 8 | 2017-10-07T15:01:01.000Z | 2022-03-06T01:03:06.000Z | AnnotationScripts/Defects4JSpecific/getCoverageDetails.py | UMass-COMPSCI-630/AutomatedRepairApplicabilityData | e33f0ccbfe63010cab7fe72cc05b8d289760e8a8 | [
"MIT"
] | null | null | null | AnnotationScripts/Defects4JSpecific/getCoverageDetails.py | UMass-COMPSCI-630/AutomatedRepairApplicabilityData | e33f0ccbfe63010cab7fe72cc05b8d289760e8a8 | [
"MIT"
] | 5 | 2018-01-10T02:36:22.000Z | 2022-03-16T11:30:56.000Z | # PURPOSE:script to get coverage of test suite for Defects4J defects
# INPUT: script requires <path-to-defects4j> as command line argument
# OUTPUT: output of the script is Defects4JCoverage.csv that lists Project, DefectId, and StatementCoverage for all the defects of Defects4J
# HOW TO RUN: run the script using command: python getCoverageDetails.py <path-to-defects4j>
# REQUIREMENTS AND DEPENDENCIES: script requires Defects4J installed on system"
import os
import commands
import sys
if len(sys.argv) < 2:
print "ERROR: Please provide path to Defects4J directory"
sys.exit()
defects4jpath = str(sys.argv[1]) # path to Defects4J
outputfile = open("Defects4JCoverage.csv", 'w')
outputfile.write("Project,DefectId,StatementCoverage\n")
projects = ["Chart", "Lang", "Math", "Time"]
noofdefects = {}
noofdefects["Chart"] = 26
noofdefects["Lang"] = 65
noofdefects["Math"] = 106
noofdefects["Time"] = 27
for proj in projects:
for i in range(1,noofdefects[proj]+1):
command = defects4jpath + "/framework/bin/defects4j checkout -p " + proj + " -v " + str(i) + "b -w /tmp/" + proj.lower() + "_" + str(i) + "_buggy"
print command
checkoutput = commands.getoutput(command)
if checkoutput:
os.chdir("/tmp/" + proj.lower() + "_" + str(i) + "_buggy")
command = defects4jpath + "/framework/bin/defects4j coverage"
print command
covoutput = commands.getoutput(command)
print covoutput
lines = covoutput.split('\n')
found=0
for l in lines:
if l.find("Line coverage:")!=-1 :
found=1
stmtcoverage = l[l.find(":")+2:len(l)]
if found==1:
outline = proj + "," + str(i) + "," + str(stmtcoverage)
outputfile.write(outline)
outputfile.write('\n')
outputfile.close()
| 36.729167 | 148 | 0.678956 | # PURPOSE:script to get coverage of test suite for Defects4J defects
# INPUT: script requires <path-to-defects4j> as command line argument
# OUTPUT: output of the script is Defects4JCoverage.csv that lists Project, DefectId, and StatementCoverage for all the defects of Defects4J
# HOW TO RUN: run the script using command: python getCoverageDetails.py <path-to-defects4j>
# REQUIREMENTS AND DEPENDENCIES: script requires Defects4J installed on system"
import os
import commands
import sys
if len(sys.argv) < 2:
print "ERROR: Please provide path to Defects4J directory"
sys.exit()
defects4jpath = str(sys.argv[1]) # path to Defects4J
outputfile = open("Defects4JCoverage.csv", 'w')
outputfile.write("Project,DefectId,StatementCoverage\n")
projects = ["Chart", "Lang", "Math", "Time"]
noofdefects = {}
noofdefects["Chart"] = 26
noofdefects["Lang"] = 65
noofdefects["Math"] = 106
noofdefects["Time"] = 27
for proj in projects:
for i in range(1,noofdefects[proj]+1):
command = defects4jpath + "/framework/bin/defects4j checkout -p " + proj + " -v " + str(i) + "b -w /tmp/" + proj.lower() + "_" + str(i) + "_buggy"
print command
checkoutput = commands.getoutput(command)
if checkoutput:
os.chdir("/tmp/" + proj.lower() + "_" + str(i) + "_buggy")
command = defects4jpath + "/framework/bin/defects4j coverage"
print command
covoutput = commands.getoutput(command)
print covoutput
lines = covoutput.split('\n')
found=0
for l in lines:
if l.find("Line coverage:")!=-1 :
found=1
stmtcoverage = l[l.find(":")+2:len(l)]
if found==1:
outline = proj + "," + str(i) + "," + str(stmtcoverage)
outputfile.write(outline)
outputfile.write('\n')
outputfile.close()
| 0 | 0 | 0 |
2a521e742d262a58567afd58de4beb93015891bb | 1,556 | py | Python | spyder_pomodoro_timer/spyder/confpage.py | map0logo/spyder-pomodoro-timer | 88c0b870fecb1d97ea6b0624cbf8da90aa181e48 | [
"MIT"
] | null | null | null | spyder_pomodoro_timer/spyder/confpage.py | map0logo/spyder-pomodoro-timer | 88c0b870fecb1d97ea6b0624cbf8da90aa181e48 | [
"MIT"
] | null | null | null | spyder_pomodoro_timer/spyder/confpage.py | map0logo/spyder-pomodoro-timer | 88c0b870fecb1d97ea6b0624cbf8da90aa181e48 | [
"MIT"
] | 1 | 2021-09-02T22:09:36.000Z | 2021-09-02T22:09:36.000Z | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2021, Francisco Palm
#
# Licensed under the terms of the MIT license
# ----------------------------------------------------------------------------
"""
Spyder Pomodoro Timer Preferences Page.
"""
from qtpy.QtWidgets import QGridLayout, QGroupBox, QVBoxLayout
from spyder.api.preferences import PluginConfigPage
from spyder.api.translations import get_translation
from spyder_pomodoro_timer.spyder.config import POMODORO_DEFAULT
_ = get_translation("spyder_pomodoro_timer.spyder")
# --- PluginConfigPage API
# ------------------------------------------------------------------------
| 33.826087 | 78 | 0.580334 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2021, Francisco Palm
#
# Licensed under the terms of the MIT license
# ----------------------------------------------------------------------------
"""
Spyder Pomodoro Timer Preferences Page.
"""
from qtpy.QtWidgets import QGridLayout, QGroupBox, QVBoxLayout
from spyder.api.preferences import PluginConfigPage
from spyder.api.translations import get_translation
from spyder_pomodoro_timer.spyder.config import POMODORO_DEFAULT
_ = get_translation("spyder_pomodoro_timer.spyder")
class SpyderPomodoroTimerConfigPage(PluginConfigPage):
# --- PluginConfigPage API
# ------------------------------------------------------------------------
def setup_page(self):
limits_group = QGroupBox(_("Time limits"))
pomodoro_spin = self.create_spinbox(
_("Pomodoro timer limit"),
_("min"),
"pomodoro_limit",
default=POMODORO_DEFAULT,
min_=5,
max_=100,
step=1,
)
pt_limits_layout = QGridLayout()
pt_limits_layout.addWidget(pomodoro_spin.plabel, 0, 0)
pt_limits_layout.addWidget(pomodoro_spin.spinbox, 0, 1)
pt_limits_layout.addWidget(pomodoro_spin.slabel, 0, 2)
pt_limits_layout.setColumnStretch(1, 100)
limits_group.setLayout(pt_limits_layout)
vlayout = QVBoxLayout()
vlayout.addWidget(limits_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
| 763 | 33 | 49 |
8f7b738aed932311b16e1d2bb8ef45c723d8c5b8 | 712 | py | Python | cli.py | Hubert482/cainapp | 7a74a9b186ee358168c8f050e445fbe9f91f9c47 | [
"MIT"
] | 18 | 2021-03-27T15:39:35.000Z | 2022-03-07T11:19:48.000Z | cli.py | Hubert482/cainapp | 7a74a9b186ee358168c8f050e445fbe9f91f9c47 | [
"MIT"
] | null | null | null | cli.py | Hubert482/cainapp | 7a74a9b186ee358168c8f050e445fbe9f91f9c47 | [
"MIT"
] | 4 | 2021-03-27T15:26:16.000Z | 2021-06-11T23:17:48.000Z | import argparse
arg_lists = []
parser = argparse.ArgumentParser()
data_arg = add_argument_group('data')
data_arg.add_argument('--path', type=str, default='frames/')
data_arg.add_argument('--img_fmt', type=str, default='jpg')
data_arg.add_argument('--model', type=str, default='output.pth')
data_arg.add_argument('--run', type=int, default= 2) # example 1=2x 2=4x 3=8x ...
startnum=0
args = parser.parse_args()
import modules.generate
while args.runtimes>startnum:
generate.interpolation(batch_size=4, temp_img = args.path, fp16=True, modelp=args.model,img_fmt=args.img_fmt)
startnum+=1
| 29.666667 | 113 | 0.740169 | import argparse
arg_lists = []
parser = argparse.ArgumentParser()
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
data_arg = add_argument_group('data')
data_arg.add_argument('--path', type=str, default='frames/')
data_arg.add_argument('--img_fmt', type=str, default='jpg')
data_arg.add_argument('--model', type=str, default='output.pth')
data_arg.add_argument('--run', type=int, default= 2) # example 1=2x 2=4x 3=8x ...
startnum=0
args = parser.parse_args()
import modules.generate
while args.runtimes>startnum:
generate.interpolation(batch_size=4, temp_img = args.path, fp16=True, modelp=args.model,img_fmt=args.img_fmt)
startnum+=1
| 91 | 0 | 23 |
4e9e90fd5d0f78c4824269ead10a06923f91e534 | 3,865 | py | Python | regress_topic_audio_genre_on_rating.py | jrgillick/music_supervisor | 98a410ed91384aeadd69dc9ee48f8d1e63dda02b | [
"MIT"
] | 2 | 2019-09-19T17:02:16.000Z | 2020-05-21T13:44:17.000Z | regress_topic_audio_genre_on_rating.py | jrgillick/music_supervisor | 98a410ed91384aeadd69dc9ee48f8d1e63dda02b | [
"MIT"
] | null | null | null | regress_topic_audio_genre_on_rating.py | jrgillick/music_supervisor | 98a410ed91384aeadd69dc9ee48f8d1e63dda02b | [
"MIT"
] | null | null | null | import featurize, load_data, numpy as np
import sklearn.linear_model
from scipy import stats
import statsmodels.api as sm
import sys
from sklearn import preprocessing
import pandas as pd
data = load_data.load_data()
data.tempo /= np.max(data.tempo)
audio_features1 = ['mode','tempo','danceability','acousticness','instrumentalness']
topics=["topic-0-nice-bit", "topic-1-sir-dear", "topic-2-christmas-la", "topic-3-dad-mom", "topic-4-sir-colonel", "topic-5-um-work", "topic-6-president-mr.", "topic-7-japanese-dawson", "topic-8-unsub-garcia", "topic-9-game-team", "topic-10-sir-captain", "topic-11-mr.-court", "topic-12-boat-water", "topic-13-leave-understand", "topic-14-fuck-shit", "topic-15-war-country", "topic-16-years-world", "topic-17-plane-move", "topic-18-captain-ship", "topic-19-police-kill", "topic-20-bit-mum", "topic-21-ah-aah", "topic-22-'t-narrator", "topic-23-sighs-chuckles", "topic-24-ya-'em", "topic-25-remember-feel", "topic-26-boy-huh", "topic-27-mr.-sir", "topic-28-dr.-doctor", "topic-29-father-lord", "topic-30-money-business", "topic-31-alright-lt", "topic-32-sir-brother", "topic-33-school-class", "topic-34-vic-jax", "topic-35-gibbs-mcgee", "topic-36-monsieur-madame", "topic-37-baby-yo", "topic-38-agent-security", "topic-39-kill-dead", "topic-40-music-show", "topic-41-ofthe-thankyou", "topic-42-dude-cool", "topic-43-spanish-el", "topic-44-eat-nice", "topic-45-murder-killed", "topic-46-car-drive", "topic-47-town-horse", "topic-48-film-movie", "topic-49-woman-married"]
genre_features = [d for d in list(data.columns) if 'genre_' in d]
feats = topics + audio_features1 + genre_features
# e.g. python regress_topics_on_audio.py danceability
run_regression_cem(sys.argv[1], feats) | 35.458716 | 1,167 | 0.700129 | import featurize, load_data, numpy as np
import sklearn.linear_model
from scipy import stats
import statsmodels.api as sm
import sys
from sklearn import preprocessing
import pandas as pd
data = load_data.load_data()
data.tempo /= np.max(data.tempo)
audio_features1 = ['mode','tempo','danceability','acousticness','instrumentalness']
topics=["topic-0-nice-bit", "topic-1-sir-dear", "topic-2-christmas-la", "topic-3-dad-mom", "topic-4-sir-colonel", "topic-5-um-work", "topic-6-president-mr.", "topic-7-japanese-dawson", "topic-8-unsub-garcia", "topic-9-game-team", "topic-10-sir-captain", "topic-11-mr.-court", "topic-12-boat-water", "topic-13-leave-understand", "topic-14-fuck-shit", "topic-15-war-country", "topic-16-years-world", "topic-17-plane-move", "topic-18-captain-ship", "topic-19-police-kill", "topic-20-bit-mum", "topic-21-ah-aah", "topic-22-'t-narrator", "topic-23-sighs-chuckles", "topic-24-ya-'em", "topic-25-remember-feel", "topic-26-boy-huh", "topic-27-mr.-sir", "topic-28-dr.-doctor", "topic-29-father-lord", "topic-30-money-business", "topic-31-alright-lt", "topic-32-sir-brother", "topic-33-school-class", "topic-34-vic-jax", "topic-35-gibbs-mcgee", "topic-36-monsieur-madame", "topic-37-baby-yo", "topic-38-agent-security", "topic-39-kill-dead", "topic-40-music-show", "topic-41-ofthe-thankyou", "topic-42-dude-cool", "topic-43-spanish-el", "topic-44-eat-nice", "topic-45-murder-killed", "topic-46-car-drive", "topic-47-town-horse", "topic-48-film-movie", "topic-49-woman-married"]
genre_features = [d for d in list(data.columns) if 'genre_' in d]
feats = topics + audio_features1 + genre_features
def cem(target_feature, X, y):
buckets=[]
for i in range(2):
buckets.append({})
# keep track of counts -- we want to have the same distribution of the treatment within each bucket
counts=np.zeros(2)
# get the highest scoring topics for each movie
argtopics=X[topics].idxmax(axis="columns")
for i, row in X.iterrows():
signature=[]
signature.append(argtopics[i])
# binarize the treatmenet variable (e.g., danceability)
target_val=row[target_feature]
if target_val > .5:
target_val=int(1)
else:
target_val=int(0)
counts[target_val]+=1
# binarize the other audio features; genre features are already binary
for feat in audio_features1:
if feat == target_feature:
continue
val=row[feat]
if val > .5:
val=1
else:
val=0
row[feat]=val
signature.append(val)
for feat in genre_features:
signature.append(row[feat])
sigstr=' '.join(str(x) for x in signature)
# the feature values define the bucket this point is placed into
if sigstr not in buckets[target_val]:
buckets[target_val][sigstr]={}
buckets[target_val][sigstr][i]=1
matchedids={}
f0=counts[0]/(counts[0]+counts[1])
f1=1-f0
for sigstring in buckets[0]:
if sigstring in buckets[1]:
# mininum of 5 points in each treatment condition within each bucket
if len(buckets[0][sigstring]) < 5 or len(buckets[1][sigstring]) < 5:
continue
# subsample points in bucket to reflect overall distribution of treatment in overall data
t0=len(buckets[0])*f0
t1=len(buckets[1])*f1
if t0 < 1 or t1 < 1:
continue
c=0
for idd in buckets[0][sigstring]:
matchedids[idd]=1
c+=1
if c >= t0:
break
c=0
for idd in buckets[1][sigstring]:
matchedids[idd]=1
c+=1
if c >= t1:
break
# return subset of matched data and regress on that
return X.iloc[list(matchedids.keys())], y.iloc[list(matchedids.keys())]
def run_regression_cem(treatment_variable, features):
X = data[features]
y = data.averageRating
X, y=cem(treatment_variable, X, y)
X_with_bias_term = sm.add_constant(X)
est = sm.OLS(y, X_with_bias_term)
fit = est.fit()
print(fit.summary())
# e.g. python regress_topics_on_audio.py danceability
run_regression_cem(sys.argv[1], feats) | 2,102 | 0 | 46 |
9ae9ca898c1b4ce9068c80358a718c8ab95b84ae | 5,395 | py | Python | options/options.py | ma3252788/detection_template | 0079e8e40c59c1b0c9b868e9a361bd0cc232a583 | [
"MIT"
] | 3 | 2021-06-20T03:35:04.000Z | 2021-07-18T14:13:14.000Z | options/options.py | ma3252788/detection_template | 0079e8e40c59c1b0c9b868e9a361bd0cc232a583 | [
"MIT"
] | null | null | null | options/options.py | ma3252788/detection_template | 0079e8e40c59c1b0c9b868e9a361bd0cc232a583 | [
"MIT"
] | null | null | null | import argparse
import json
import sys
import os
import torch
import misc_utils as utils
"""
Arg parse
opt = parse_args()
"""
opt = parse_args()
opt.device = 'cuda:' + opt.gpu_ids if torch.cuda.is_available() and opt.gpu_ids != '-1' else 'cpu'
if opt.opt:
with open(opt.opt, 'r') as f:
a = json.load(f)
for k, v in a.items():
setattr(opt, k, v)
if opt.debug:
opt.save_freq = 1
opt.eval_freq = 1
opt.log_freq = 1
if opt.tag != 'cache':
pid = f'[PID:{os.getpid()}]'
with open('run_log.txt', 'a') as f:
f.writelines(utils.get_time_str(fmt="%Y-%m-%d %H:%M:%S") + ' ' + pid + ' ' + get_command_run() + '\n')
# utils.print_args(opt)
| 38.535714 | 125 | 0.650602 | import argparse
import json
import sys
import os
import torch
import misc_utils as utils
"""
Arg parse
opt = parse_args()
"""
def parse_args():
# experiment specifics
parser = argparse.ArgumentParser()
parser.add_argument('--tag', type=str, default='cache',
help='folder name to save the outputs')
parser.add_argument('--opt', type=str, default=None,
help='parse options from .opt file')
parser.add_argument('--gpu_ids', '--gpu', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
# dirs (NOT often Changed)
parser.add_argument('--data_root', type=str, default='./datasets/')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--log_dir', type=str, default='./logs', help='logs are saved here')
parser.add_argument('--result_dir', type=str, default='./results', help='results are saved here')
#######################
parser.add_argument('--model', type=str, default=None, help='which model to use')
parser.add_argument('--backbone', type=str, default=None, help='which backbone to use')
parser.add_argument('--norm', type=str, choices=['batch', 'instance', None], default=None,
help='[instance] normalization or [batch] normalization')
# batch size
parser.add_argument('--batch_size', '-b', type=int, default=1, help='input batch size')
# optimizer and scheduler
parser.add_argument('--optimizer', choices=['adam', 'sgd', 'radam', 'lookahead', 'ranger'], default='adam')
parser.add_argument('--scheduler', default='1x')
# data augmentation
# parser.add_argument('--aug', action='store_true', help='Randomly scale, jitter, change hue, saturation and brightness')
# scale
parser.add_argument('--scale', type=int, default=None, help='scale images to this size')
parser.add_argument('--crop', type=int, default=None, help='then crop to this size')
parser.add_argument('--workers', '-w', type=int, default=4, help='num of workers')
# for datasets
parser.add_argument('--dataset', default='voc', help='training dataset')
parser.add_argument('--transform', default=None, help='transform')
parser.add_argument('--val_set', type=str, default=None)
parser.add_argument('--test_set', type=str, default=None)
# init weights
parser.add_argument('--init', type=str, default=None, help='{normal, xavier, kaiming, orthogonal}')
# training options
parser.add_argument('--debug', action='store_true', help='debug mode')
parser.add_argument('--vis', action='store_true', help='vis eval result')
parser.add_argument('--load', type=str, default=None, help='load checkpoint')
parser.add_argument('--weights', type=str, default=None, help='load checkpoint for Detector')
parser.add_argument('--resume', action='store_true', help='resume training, only used when --load')
parser.add_argument('--reset', action='store_true', help='reset training, only used when --load')
parser.add_argument('--epochs', '--max_epoch', type=int, default=500, help='epochs to train')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
parser.add_argument('--seed', type=int, default=None, help='random seed')
# test time bbox settings
parser.add_argument('--conf_thresh', type=float, default=0.01, help='bboxes with conf < this threshold will be ignored')
parser.add_argument('--nms_thresh', type=float, default=0.45, help='nms threshold')
parser.add_argument('--wbf_thresh', type=float, default=0.5, help='wbf threshold')
parser.add_argument('--box_fusion', choices=['nms', 'wbf'], default='nms')
parser.add_argument('--save_freq', type=int, default=10, help='freq to save models')
parser.add_argument('--eval_freq', '--val_freq', type=int, default=10, help='freq to eval models')
parser.add_argument('--log_freq', type=int, default=1, help='freq to vis in tensorboard')
parser.add_argument('--no_eval', '--no_val', action='store_true', help='不要eval')
# test options
parser.add_argument('--tta', action='store_true', help='test with augmentation')
parser.add_argument('--tta-x8', action='store_true', help='test with augmentation x8')
return parser.parse_args()
opt = parse_args()
opt.device = 'cuda:' + opt.gpu_ids if torch.cuda.is_available() and opt.gpu_ids != '-1' else 'cpu'
if opt.opt:
with open(opt.opt, 'r') as f:
a = json.load(f)
for k, v in a.items():
setattr(opt, k, v)
if opt.debug:
opt.save_freq = 1
opt.eval_freq = 1
opt.log_freq = 1
def get_command_run():
args = sys.argv.copy()
args[0] = args[0].split('/')[-1]
if 'CUDA_VISIBLE_DEVICES' in os.environ:
gpu_id = os.environ['CUDA_VISIBLE_DEVICES']
command = f'CUDA_VISIBLE_DEVICES={gpu_id} '
else:
command = ''
if sys.version[0] == '3':
command += 'python3'
else:
command += 'python'
for i in args:
command += ' ' + i
return command
if opt.tag != 'cache':
pid = f'[PID:{os.getpid()}]'
with open('run_log.txt', 'a') as f:
f.writelines(utils.get_time_str(fmt="%Y-%m-%d %H:%M:%S") + ' ' + pid + ' ' + get_command_run() + '\n')
# utils.print_args(opt)
| 4,643 | 0 | 46 |
02bd8eb997c4892db022e0c8b884eb12f1510e26 | 23,764 | py | Python | copperhead/compiler/rewrites.py | zahangircse/copperhead | b6b15adfe99dcd102c4a7f23508779cbc235c655 | [
"Apache-2.0"
] | null | null | null | copperhead/compiler/rewrites.py | zahangircse/copperhead | b6b15adfe99dcd102c4a7f23508779cbc235c655 | [
"Apache-2.0"
] | null | null | null | copperhead/compiler/rewrites.py | zahangircse/copperhead | b6b15adfe99dcd102c4a7f23508779cbc235c655 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2008-2012 NVIDIA Corporation
# Copyright 2009-2010 University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Basic syntactic rewrites for Copperhead compiler.
This module implements the rewrite passes used by the Copperhead
compiler to transform the input program into a more easily analyzed
form. The routines in this module assume that the syntax trees they are
given are well-formed, but they do not generally make any assumptions
about type soundness.
The rewrites provided by this module are fairly standard and operate on
the source program without consideration for any parallelism.
Supported rewrites include:
o Closure conversion
o Lambda lifting
o Single assignment conversion
"""
import coresyntax as S
import pltools
from utility import flatten
import copy
import coretypes as T
def single_assignment_conversion(stmt, env={}, exceptions=set()):
'Rename locally declared variables so that each is bound exactly once'
rewrite = SingleAssignmentRewrite(env, exceptions)
return rewrite.rewrite(stmt)
class LambdaLifter(S.SyntaxRewrite):
"""
Convert every expression of the form:
lambda x1,...,xn: E
into a reference to a proceduce __lambdaN and add
def __lambdaN(x1,...,xn): return E
to the procedure list.
This rewriter assumes that closure conversion has already been
performed. In other words, there are no freely occurring
local variables in the body of the lambda expression.
"""
class ProcedureFlattener(S.SyntaxRewrite):
"""
Flatten the list of defined procedures so that no definition is
nested within another procedure. This should only be applied after
closure conversion and lambda lifting are complete.
"""
# XXX If things other than procedures become allowed as top-level
# forms, make sure that they are handled here.
# XXX Most of the code in this rewriter simply serves to track
# variables defined in the current scope. That should be
# abstracted into a more generic base class that could be used
# elsewhere.
# XXX This rewrite rule -- coupled with the rule for _Procedure in
# _ClosureConverter -- is an ugly hack for rewriting calls to
# procedures. We should find a more elegant solution!
def closure_conversion(ast, globals=None):
"""
Detect and explicitly tag all variables in the given syntax tree
which are lexically closed over by lambdas or nested procedure
definitions.
A variable occurring within a lambda/procedure is considered to form
a closure if:
- it is not bound as a formal parameter of the lambda/procedure
- it is bound in the containing scope of the lambda/procedure
Such variables are lifted into arguments to explicit "closure"
forms, and are passed as explicit arguments to the nested
lambda/procedure.
e.g., lambda x: lambda y: x =>
lambda x: closure([x], lambda y, _K0: _K0)
Global variables (if any) defined in the globals parameter are never
closed over, since they are globally visible.
The copperhead.interlude module provide a native Python
implementation of the Copperhead closure() expression.
"""
converter = _ClosureConverter(globals=globals)
converted = converter.rewrite(ast)
return converted
class ConditionalProtector(S.SyntaxRewrite):
"""
Convert every expression of the form:
E1 if P else E2
into the equivalent form:
((lambda: E1) if P else (lambda: E2))()
The purpose of this rewriter is to protect the branches of the
conditional during later phases of the compiler. It guarantees that
exactly one of E1/E2 will ever be evaluated.
"""
| 34.045845 | 145 | 0.60276 | #
# Copyright 2008-2012 NVIDIA Corporation
# Copyright 2009-2010 University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Basic syntactic rewrites for Copperhead compiler.
This module implements the rewrite passes used by the Copperhead
compiler to transform the input program into a more easily analyzed
form. The routines in this module assume that the syntax trees they are
given are well-formed, but they do not generally make any assumptions
about type soundness.
The rewrites provided by this module are fairly standard and operate on
the source program without consideration for any parallelism.
Supported rewrites include:
o Closure conversion
o Lambda lifting
o Single assignment conversion
"""
import coresyntax as S
import pltools
from utility import flatten
import copy
import coretypes as T
class SourceGatherer(S.SyntaxRewrite):
def __init__(self, globals):
self.globals = globals
self.env = pltools.Environment()
self.clean = []
def gather(self, suite):
self.sources = []
self.gathered = set()
for stmt in suite:
self.clean.append(self.rewrite(stmt))
while self.sources:
stmt = self.sources.pop(0)
self.clean.insert(0, self.rewrite(stmt))
return list(flatten(self.clean))
def _Procedure(self, proc):
proc_id = proc.name().id
self.env[proc_id] = proc_id
self.env.begin_scope()
for param in flatten(proc.formals()):
id = param.id
self.env[id] = id
self.rewrite_children(proc)
self.env.end_scope()
return proc
def _Bind(self, bind):
destination = bind.binder()
if isinstance(destination, S.Tuple):
for dest in flatten(destination):
self.env[dest.id] = dest.id
else:
id = destination.id
self.env[id] = id
self.rewrite_children(bind)
return bind
def _Name(self, name):
if not name.id in self.env:
if name.id in self.globals:
fn = self.globals[name.id]
if hasattr(fn, 'syntax_tree') and \
fn.__name__ not in self.gathered:
self.sources.append(fn.syntax_tree)
self.gathered.add(fn.__name__)
return name
def gather_source(stmt, M):
gatherer = SourceGatherer(M.globals)
gathered = gatherer.gather(stmt)
return gathered
class IdentifierMarker(S.SyntaxRewrite):
def __init__(self, globals):
self.globals = globals
def _Name(self, name):
if name.id in self.globals:
if hasattr(self.globals[name.id], 'syntax_tree'):
#A user wrote this identifier
return S.mark_user(name)
else:
return name
else:
return S.mark_user(name)
def _Procedure(self, proc):
self.rewrite_children(proc)
proc.variables = map(S.mark_user, proc.variables)
return proc
def _Lambda(self, lamb):
return self._Procedure(lamb)
def _Bind(self, bind):
self.rewrite_children(bind)
bind.id = self.rewrite(bind.id)
return bind
def mark_identifiers(stmt, M):
marker = IdentifierMarker(M.globals)
marked = marker.rewrite(stmt)
#Rather than make core syntax deal sordidly with strings
#Wrap them up here.
def mark_user(x):
return S.mark_user(S.Name(x)).id
M.entry_points = map(mark_user, M.entry_points)
for x in M.input_types.keys():
M.input_types[mark_user(x)] = M.input_types[x]
del M.input_types[x]
return marked
class VariadicLowerer(S.SyntaxRewrite):
def __init__(self):
self.applies = set(['zip'])
# XXX Do this for unzip as well
self.binders = set(['unzip'])
def _Map(self, ast):
args = ast.parameters
arity = len(args) - 1
assert(arity > 0)
return S.Apply(S.Name('map' + str(arity)),
args)
def _Apply(self, ast):
fn_id = ast.function().id
if fn_id in self.applies:
args = ast.arguments()
arity = len(args)
return S.Apply(S.Name(fn_id + str(arity)),
args)
else:
return ast
def lower_variadics(stmt):
rewriter = VariadicLowerer()
lowered = rewriter.rewrite(stmt)
return lowered
class SingleAssignmentRewrite(S.SyntaxRewrite):
import itertools
serial = itertools.count(1)
def __init__(self, env, exceptions):
self.env = pltools.Environment(env)
self.exceptions = exceptions
self.freeze = False
def _Return(self, stmt):
result = S.Return(S.substituted_expression(stmt.value(), self.env))
return result
def _Cond(self, cond):
condition = S.substituted_expression(cond.parameters[0], self.env)
self.rewrite_children(cond)
return S.Cond(condition, cond.parameters[1], cond.parameters[2])
def _While(self, cond):
condition = S.substituted_expression(cond.parameters[0], self.env)
self.freeze = True
self.rewrite_children(cond)
cond.parameters[0] = condition
self.freeze = False
return cond
def _Bind(self, stmt):
var = stmt.binder()
varNames = [x.id for x in flatten(var)]
operation = S.substituted_expression(stmt.value(), self.env)
for name in varNames:
if self.freeze:
if name in self.env:
rename = self.env[name]
elif name not in self.exceptions:
rename = '%s_%s' % (name, SingleAssignmentRewrite.serial.next())
else:
rename = name
elif name not in self.exceptions:
rename = '%s_%s' % (name, SingleAssignmentRewrite.serial.next())
else:
rename = name
self.env[name] = S.Name(rename)
result = S.Bind(S.substituted_expression(var, self.env), operation)
return result
def _Procedure(self, stmt):
self.env.begin_scope()
for var in flatten(stmt.variables):
self.env[var.id] = var
result = self.rewrite_children(stmt)
self.env.end_scope()
return result
def single_assignment_conversion(stmt, env={}, exceptions=set()):
'Rename locally declared variables so that each is bound exactly once'
rewrite = SingleAssignmentRewrite(env, exceptions)
return rewrite.rewrite(stmt)
class LambdaLifter(S.SyntaxRewrite):
"""
Convert every expression of the form:
lambda x1,...,xn: E
into a reference to a proceduce __lambdaN and add
def __lambdaN(x1,...,xn): return E
to the procedure list.
This rewriter assumes that closure conversion has already been
performed. In other words, there are no freely occurring
local variables in the body of the lambda expression.
"""
def __init__(self):
# Collect lifted Lambdas as Procedures
self.proclist = []
self.names = pltools.name_supply(stems=['_lambda'], drop_zero=False)
def _Lambda(self, e):
fn = S.Name(self.names.next())
self.rewrite_children(e)
body = S.Return(e.parameters[0])
self.proclist.append(S.Procedure(fn, e.variables, [body]))
return fn
def _Procedure(self, ast):
# We explicitly interleave lifted lambda procedures with the
# statements from which they come. This guarantees correct
# ordering of existing nested procedures with new
# lambda-generated procedures.
body = []
for stmt in ast.parameters:
stmt = self.rewrite(stmt)
body = body + self.proclist + [stmt]
self.proclist = []
ast.parameters = body
return ast
def lambda_lift(e):
lift = LambdaLifter()
eL = lift.rewrite(e)
return lift.proclist + eL
class ProcedureFlattener(S.SyntaxRewrite):
"""
Flatten the list of defined procedures so that no definition is
nested within another procedure. This should only be applied after
closure conversion and lambda lifting are complete.
"""
def __init__(self):
self.toplevel = list()
def _Procedure(self, e):
self.rewrite_children(e)
e.parameters = filter(lambda x: x is not None, e.parameters)
self.toplevel.append(e)
return None
# XXX If things other than procedures become allowed as top-level
# forms, make sure that they are handled here.
def procedure_flatten(e):
flattener = ProcedureFlattener()
eF = flattener.rewrite(e)
return flattener.toplevel
class _ClosureRecursion(S.SyntaxRewrite):
# XXX Most of the code in this rewriter simply serves to track
# variables defined in the current scope. That should be
# abstracted into a more generic base class that could be used
# elsewhere.
def __init__(self, env):
self.env = env
def locally_bound(self, B):
for v in flatten(B):
self.env[v.id] = v.id
def _Bind(self, ast):
self.rewrite_children(ast)
binders = [v for v in S.walk(ast.binder()) if isinstance(v, S.Name)]
self.locally_bound(binders)
return ast
def _Lambda(self, ast):
self.env.begin_scope()
self.locally_bound(ast.formals())
self.rewrite_children(ast)
self.env.end_scope()
return ast
def _Procedure(self, ast):
self.env.begin_scope()
self.locally_bound(ast.variables)
self.rewrite_children(ast)
self.env.end_scope()
return ast
# XXX This rewrite rule -- coupled with the rule for _Procedure in
# _ClosureConverter -- is an ugly hack for rewriting calls to
# procedures. We should find a more elegant solution!
def _Name(self, ast):
x = getattr(self.env, ast.id, None)
if ast.id in self.env and isinstance(self.env[ast.id], S.Closure):
return S.Closure(self.env[ast.id].variables, ast)
else:
return ast
class _ClosureConverter(_ClosureRecursion):
def __init__(self, globals=None):
self.globals = globals or dict()
self.env = pltools.Environment()
def _Lambda(self, e):
_ClosureRecursion._Lambda(self, e)
formals = [v.id for v in flatten(e.formals())]
# Take the free variable list, stick it in a set to make sure we don't
# duplicate a variable, and then put it back in a list to make sure
# it's got a defined ordering, which sets don't have
free = list(set([v for v in S.free_variables(e.body(), formals)
if v in self.env]))
if free:
bound = [S.Name("_K%d" % i) for i in range(len(free))]
body = S.substituted_expression(e.body(), dict(zip(free, bound)))
e.parameters = [body]
e.variables = e.variables + bound
return S.Closure([S.Name(x) for x in free], e)
else:
return e
def _Procedure(self, ast):
binders = [v.id for v in flatten(ast.variables)] # NOTE: this includes name
_ClosureRecursion._Procedure(self, ast)
# Take the free variable list, stick it in a set to make sure we don't
# duplicate a variable, and then put it back in a list to make sure
# it's got a defined ordering, which sets don't have
free = list(set([v for v in S.free_variables(ast.body(), binders)
if v in self.env]))
if free:
bound = [S.Name("_K%d" % i) for i in range(len(free))]
ast.variables = ast.variables + bound
ast.parameters = S.substituted_expression(ast.parameters,
dict(zip(free, bound)))
# Transform recursive calls of this procedure within its own body.
recursive = _ClosureRecursion(self.env)
self.env[ast.name().id] = S.Closure(bound,
ast.name())
ast.parameters = recursive.rewrite(ast.parameters)
# Register rewrite for calls to this procedure in later
# parts of the defining scope
self.env[ast.name().id] = S.Closure([S.Name(x) for x in free],
ast.name())
# else:
# self.locally_bound([ast.name()])
return ast
def closure_conversion(ast, globals=None):
"""
Detect and explicitly tag all variables in the given syntax tree
which are lexically closed over by lambdas or nested procedure
definitions.
A variable occurring within a lambda/procedure is considered to form
a closure if:
- it is not bound as a formal parameter of the lambda/procedure
- it is bound in the containing scope of the lambda/procedure
Such variables are lifted into arguments to explicit "closure"
forms, and are passed as explicit arguments to the nested
lambda/procedure.
e.g., lambda x: lambda y: x =>
lambda x: closure([x], lambda y, _K0: _K0)
Global variables (if any) defined in the globals parameter are never
closed over, since they are globally visible.
The copperhead.interlude module provide a native Python
implementation of the Copperhead closure() expression.
"""
converter = _ClosureConverter(globals=globals)
converted = converter.rewrite(ast)
return converted
class ExpressionFlattener(S.SyntaxRewrite):
def __init__(self):
self.stmts = [list()]
self.names = pltools.name_supply(stems=['e'], drop_zero=False)
def top(self): return self.stmts[-1]
def emit(self, ast): self.top().append(ast)
def push(self): self.stmts.append(list())
def pop(self):
x = self.top()
self.stmts.pop()
return x
def _Lambda(self, ast):
raise ValueError, "lambda's cannot be flattened (%s)" % e
def _Name(self, ast): return ast
def _Number(self, ast): return ast
def _Closure(self, ast): return ast
def _Expression(self, e):
subexpressions = e.parameters
e.parameters = []
for sub in subexpressions:
sub = self.rewrite(sub)
# XXX It doesn't seem right to include Closure on this list
# of "atomic" values. But phase_assignment breaks if I
# don't do this.
if not isinstance(sub, (S.Name, S.Literal, S.Closure)):
tn = S.Name(self.names.next())
self.emit(S.Bind(tn, sub))
else:
tn = sub
e.parameters.append(tn)
return e
def _Bind(self, stmt):
e = self.rewrite(stmt.value())
stmt.parameters = [e]
self.emit(stmt)
return stmt
def _Return(self, stmt):
e = self.rewrite(stmt.value())
if isinstance(e, S.Name):
# If we're returning one of the procedure formals unchanged,
# we need to copy its value into a return variable.
# Here is where we check:
if e.id not in self.formals:
#No need to copy value into a return variable
stmt.parameters = [e]
self.emit(stmt)
return
# If we're returning a tuple, we always copy the value into a return
# variable. We may undo this later on, for entry-point procedures.
ret = S.Name("result")
self.emit(S.Bind(ret, e))
stmt.parameters = [ret]
self.emit(stmt)
def _Cond(self, stmt):
test = self.rewrite(stmt.test())
self.push()
self.rewrite(stmt.body())
body = self.pop()
self.push()
self.rewrite(stmt.orelse())
orelse = self.pop()
stmt.parameters = [test, body, orelse]
self.emit(stmt)
def _Procedure(self, stmt):
self.push()
self.formals = set((x.id for x in flatten(stmt.formals())))
self.rewrite_children(stmt)
self.formals = None
body = self.pop()
stmt.parameters = body
self.emit(stmt)
def _default(self, ast):
if isinstance(ast, S.Expression):
return self._Expression(ast)
else:
raise ValueError, "can't flatten syntax (%s)" % ast
def expression_flatten(s):
flattener = ExpressionFlattener()
flattener.rewrite(s)
return flattener.top()
class LiteralCaster(S.SyntaxRewrite):
def __init__(self, globals):
self.globals = globals
def _Procedure(self, proc):
self.literal_names = set()
self.rewrite_children(proc)
return proc
def _Bind(self, bind):
if isinstance(bind.value(), S.Number):
self.literal_names.add(bind.binder().id)
self.rewrite_children(bind)
return bind
def _Apply(self, appl):
#Rewrite children
self.rewrite_children(appl)
#Insert typecasts for arguments
#First, retrieve type of function, if we can't find it, pass
fn_obj = self.globals.get(appl.function().id, None)
if not fn_obj:
return appl
#If the function doesn't have a recorded Copperhead type, pass
if not hasattr(fn_obj, 'cu_type'):
return appl
fn_type = fn_obj.cu_type
if isinstance(fn_type, T.Polytype):
fn_input_types = fn_type.monotype().input_types()
else:
fn_input_types = fn_type.input_types()
def build_cast(cast_name, args):
"Helper function to build cast expressions"
return S.Apply(S.Name(cast_name),
args)
def insert_cast(arg_type, arg):
"Returns either the argument or a casted argument"
if hasattr(arg, 'literal_expr'):
if arg_type is T.Int:
return build_cast("int32", [arg])
elif arg_type is T.Long:
return build_cast("int64", [arg])
elif arg_type is T.Float:
return build_cast("float32", [arg])
elif arg_type is T.Double:
return build_cast("float64", [arg])
elif isinstance(arg_type, str):
#We have a polymorphic function
#We must insert a polymorphic cast
#This means we search through the inputs
#To find an input with a related type
for in_type, in_arg in \
zip(fn_input_types, appl.arguments()):
if not hasattr(in_arg, 'literal_expr'):
if in_type == arg_type:
return build_cast("cast_to", [arg, in_arg])
elif isinstance(in_type, T.Seq) and \
in_type.unbox() == arg_type:
return build_cast("cast_to_el", [arg, in_arg])
#No cast was found, just return the argument
return arg
casted_arguments = map(insert_cast, fn_input_types, appl.arguments())
appl.parameters[1:] = casted_arguments
#Record if this expression is a literal expression
if all(map(lambda x: hasattr(x, 'literal_expr'), appl.arguments())):
appl.literal_expr = True
return appl
def _Number(self, ast):
ast.literal_expr = True
return ast
def _Name(self, ast):
if ast.id in self.literal_names:
ast.literal_expr = True
return ast
def cast_literals(s, M):
caster = LiteralCaster(M.globals)
casted = caster.rewrite(s)
#Inserting casts may nest expressions
return expression_flatten(casted)
class ReturnFinder(S.SyntaxVisitor):
def __init__(self, binding, env):
self.binding = list(flatten(binding))
self.env = env
def _Return(self, node):
val = list(flatten(node.value()))
assert(len(val) == len(self.binding))
for b, v in zip(self.binding, val):
self.env[v.id] = b
class FunctionInliner(S.SyntaxRewrite):
def __init__(self):
self.activeBinding = None
self.statements = []
self.procedures = {}
def _Bind(self, binding):
self.activeBinding = binding.binder()
self.rewrite_children(binding)
self.activeBinding = None
statements = self.statements
self.statements = []
if statements == []:
return binding
return statements
def _Apply(self, apply):
functionName = apply.parameters[0].id
if functionName in self.procedures:
instantiatedFunction = self.procedures[functionName]
functionArguments = instantiatedFunction.variables[1:]
instantiatedArguments = apply.parameters[1:]
env = pltools.Environment()
for (internal, external) in zip(functionArguments, instantiatedArguments):
env[internal.id] = external
return_finder = ReturnFinder(self.activeBinding, env)
return_finder.visit(instantiatedFunction)
statements = [S.substituted_expression(x, env) for x in \
instantiatedFunction.body() \
if not isinstance(x, S.Return)]
singleAssignmentInstantiation = single_assignment_conversion(statements, exceptions=set((x.id for x in flatten(self.activeBinding))))
self.statements = singleAssignmentInstantiation
return None
return apply
def _Procedure(self, proc):
self.rewrite_children(proc)
proc.parameters = list(flatten(proc.parameters))
procedureName = proc.variables[0].id
self.procedures[procedureName] = proc
return proc
def inline(s):
inliner = FunctionInliner()
return list(flatten(inliner.rewrite(s)))
def procedure_prune(ast, entries):
needed = set(entries)
# First, figure out which procedures we actually need by determining
# the free variables in each of the entry points
for p in ast:
needed.update(S.free_variables(p.body()))
# Now, only keep top-level procedures that have been referenced
return [p for p in ast if p.name().id in needed]
class ConditionalProtector(S.SyntaxRewrite):
"""
Convert every expression of the form:
E1 if P else E2
into the equivalent form:
((lambda: E1) if P else (lambda: E2))()
The purpose of this rewriter is to protect the branches of the
conditional during later phases of the compiler. It guarantees that
exactly one of E1/E2 will ever be evaluated.
"""
def __init__(self):
pass
def _If(self, e):
self.rewrite_children(e)
test = e.test()
body = S.Lambda([], e.body())
orelse = S.Lambda([], e.orelse())
e.parameters = [test, body, orelse]
return S.Apply(e, [])
| 17,080 | 404 | 1,928 |
d1f11ea3b553a01107d7a8ebb11d103ba2f5cee9 | 330 | py | Python | parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Alias.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Alias.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Alias.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
| 30 | 70 | 0.690909 | from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
class Alias(Instruccion):
def __init__(self,idnuevo,dolarnumero,idviejo,fila,columna):
self.idnuevo = idnuevo
self.dolarnumero = dolarnumero
self.idviejo = idviejo
self.fila = fila
self.columna = columna
| 201 | 4 | 52 |
86ba2510bd8862c94696c658f49a9b544b94a033 | 6,982 | py | Python | figs/b_plus_tree.py | TimeExceed/modern_db_part0 | 4cb8169c72b4c0c7052366edb92a552f353413bf | [
"CC-BY-4.0"
] | 1 | 2022-01-19T07:39:06.000Z | 2022-01-19T07:39:06.000Z | figs/b_plus_tree.py | TimeExceed/modern_db_part0 | 4cb8169c72b4c0c7052366edb92a552f353413bf | [
"CC-BY-4.0"
] | null | null | null | figs/b_plus_tree.py | TimeExceed/modern_db_part0 | 4cb8169c72b4c0c7052366edb92a552f353413bf | [
"CC-BY-4.0"
] | null | null | null | from fathom import Point, ORIGIN
from fathom.tikz import Canvas
import fathom.geometry as geo
import fathom.layout as layout
import fathom.tikz.colors as colors
from itertools import *
BRANCH = 3
| 33.729469 | 87 | 0.572472 | from fathom import Point, ORIGIN
from fathom.tikz import Canvas
import fathom.geometry as geo
import fathom.layout as layout
import fathom.tikz.colors as colors
from itertools import *
BRANCH = 3
class Inner:
SEG_WIDTH = 0.5
LEFTMOST_WIDTH = 0.3
KEY_HEIGHT = 0.5
PTR_HEIGHT = 0.3
def __init__(self, center, texts):
self._texts = texts
self._center = center
self._width = (BRANCH - 1) * Inner.SEG_WIDTH + Inner.LEFTMOST_WIDTH
self._height = Inner.KEY_HEIGHT + Inner.PTR_HEIGHT
self._ptrs = None
self._line_color = None
def set_line_color(self, color):
self._line_color = color
def upleft(self):
return self._center + Point(-self._width / 2, self._height / 2)
def pointers(self, canvas):
if self._ptrs is not None:
return self._ptrs
upleft_corner = self._center + \
Point(-self._width / 2, self._height / 2)
ptrs = repeat(upleft_corner +
Point(Inner.LEFTMOST_WIDTH + Inner.SEG_WIDTH / 2,
- Inner.KEY_HEIGHT - Inner.PTR_HEIGHT / 2))
ptrs = accumulate(ptrs, lambda x, _: x + Point(Inner.SEG_WIDTH, 0))
ptrs = islice(ptrs, len(self._texts))
ptrs = [canvas.new_bullet(center=p) for p in ptrs]
p = canvas.new_bullet(
center=upleft_corner +
Point(Inner.LEFTMOST_WIDTH/2, -
Inner.KEY_HEIGHT - Inner.PTR_HEIGHT / 2))
ptrs.insert(0, p)
self._ptrs = ptrs
return self._ptrs
def draw(self, canvas):
upleft = self.upleft()
self._draw_upleft_corner(canvas, upleft)
self._draw_lines(canvas, upleft)
self._draw_texts(canvas, upleft)
self._draw_ptr_bullets(canvas)
def _draw_lines(self, canvas, upleft):
line_color = self._line_color if self._line_color is not None else colors.BLACK
canvas.new_rectangle(
center=self._center,
width=self._width,
height=self._height,
pen_color=line_color)
canvas.new_line(
src=upleft + Point(0, -Inner.KEY_HEIGHT),
dst=upleft + Point(self._width, -Inner.KEY_HEIGHT),
pen_color=line_color)
vs = repeat(upleft + Point(Inner.LEFTMOST_WIDTH, 0))
vs = accumulate(vs, lambda x, _: x + Point(Inner.SEG_WIDTH, 0))
vs = islice(vs, BRANCH - 1)
for x in vs:
canvas.new_line(src=x, dst=x + Point(0, -self._height))
def _draw_texts(self, canvas, upleft):
start = upleft + \
Point(Inner.LEFTMOST_WIDTH + Inner.SEG_WIDTH / 2,
- Inner.KEY_HEIGHT / 2)
vs = repeat(start)
vs = accumulate(vs, lambda x, _: x + Point(Inner.SEG_WIDTH, 0))
for p, t in zip(vs, self._texts):
canvas.new_text(anchor=p, text=t)
def _draw_ptr_bullets(self, canvas):
self.pointers(canvas)
def _draw_upleft_corner(self, canvas, upleft):
center = upleft + \
Point(Inner.LEFTMOST_WIDTH / 2, -Inner.KEY_HEIGHT / 2)
canvas.new_rectangle(
center=center,
width=Inner.LEFTMOST_WIDTH,
height=Inner.KEY_HEIGHT,
pen_color=colors.INVISIBLE,
brush_color=colors.GRAY)
class Leaf:
RIGHTMOST_WIDTH = 0.3
SEG_WIDTH = 0.5
KEY_HEIGHT = 0.5
PTR_HEIGHT = 0.3
def __init__(self, center, texts):
self._texts = texts
self._center = center
self._width = (BRANCH - 1) * Leaf.SEG_WIDTH + Leaf.RIGHTMOST_WIDTH
self._height = Leaf.KEY_HEIGHT + Leaf.PTR_HEIGHT
self._ptrs = None
self._tail = None
self._line_color = None
def set_line_color(self, color):
self._line_color = color
def upleft(self):
return self._center + Point(-self._width / 2, self._height / 2)
def leftmost(self):
return self._center + Point(-self._width/2, 0)
def text_points(self):
start = self.upleft() + \
Point(Leaf.SEG_WIDTH / 2, -Leaf.KEY_HEIGHT / 2)
vs = repeat(start)
vs = accumulate(vs, lambda x, _: x + Point(Leaf.SEG_WIDTH, 0))
vs = islice(vs, BRANCH - 1)
return list(vs)
def draw(self, canvas):
upleft = self.upleft()
self._draw_lines(canvas, upleft)
self._draw_texts(canvas, upleft)
self.pointers(canvas)
self.tail(canvas)
def _draw_lines(self, canvas, upleft):
color = self._line_color if self._line_color is not None else colors.BLACK
canvas.new_rectangle(
center=self._center,
width=self._width,
height=self._height,
pen_color=color)
canvas.new_line(
src=upleft + Point(0, -Leaf.KEY_HEIGHT),
dst=upleft + Point(self._width - Leaf.RIGHTMOST_WIDTH, -Leaf.KEY_HEIGHT),
pen_color=color)
vs = repeat(upleft)
vs = accumulate(vs, lambda x, _: x + Point(Leaf.SEG_WIDTH, 0))
vs = islice(vs, 1, BRANCH)
for p in vs:
canvas.new_line(
src=p, dst=p + Point(0, -self._height), pen_color=color)
def _draw_texts(self, canvas, upleft):
for p, t in zip(self.text_points(), self._texts):
canvas.new_text(anchor=p, text=t)
def pointers(self, canvas):
if self._ptrs is not None:
return self._ptrs
upleft = self._center + Point(-self._width / 2, self._height / 2)
start = upleft + Point(Leaf.SEG_WIDTH/2,
-Leaf.KEY_HEIGHT - Leaf.PTR_HEIGHT / 2)
vs = repeat(start)
vs = accumulate(vs, lambda x, _: x + Point(Leaf.SEG_WIDTH, 0))
vs = islice(vs, BRANCH - 1)
self._ptrs = [canvas.new_bullet(center=x) for x in vs]
return self._ptrs
def tail(self, canvas):
if self._tail is not None:
return self._tail
p = self._center + Point(self._width / 2 - Leaf.RIGHTMOST_WIDTH / 2, 0)
self._tail = canvas.new_bullet(center=p)
return self._tail
def tree():
s = [
'root',
[
'left',
['left_left'],
['left_mid'],
['left_right'],
],
[
'right',
['right_left'],
['right_right'],
]
]
t = layout.tree(s, root=ORIGIN, h_sep=2, v_sep=2)
t['root'] = Inner(t['root'], ['5'])
t['left'] = Inner(t['left'], ['2', '4'])
t['right'] = Inner(t['right'], ['7'])
t['left_left'] = Leaf(t['left_left'], ['0', '1'])
t['left_mid'] = Leaf(t['left_mid'], ['2'])
t['left_right'] = Leaf(t['left_right'], ['4'])
t['right_left'] = Leaf(t['right_left'], ['5', '6'])
t['right_right'] = Leaf(t['right_right'], ['7', '8'])
return t
def link_leaves(canvas, leaves):
for s, t in zip(leaves, leaves[1:]):
canvas.new_arrow(
src=s.tail(canvas),
dst=t.leftmost())
| 6,024 | 669 | 92 |
5ef52c0473c04b8ee43c4dd30c8286d956014dc1 | 5,018 | py | Python | core/src/autogluon/core/searcher/bayesopt/utils/test_objects.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 4,462 | 2019-12-09T17:41:07.000Z | 2022-03-31T22:00:41.000Z | core/src/autogluon/core/searcher/bayesopt/utils/test_objects.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 1,408 | 2019-12-09T17:48:59.000Z | 2022-03-31T20:24:12.000Z | core/src/autogluon/core/searcher/bayesopt/utils/test_objects.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 623 | 2019-12-10T02:04:18.000Z | 2022-03-20T17:11:01.000Z | # Could eventually remove this code: Is this needed in unit tests?
"""
Object definitions that are used for testing.
"""
from typing import Iterator, Tuple, Dict
import numpy as np
from ..datatypes.common import StateIdAndCandidate
from ..datatypes.hp_ranges import HyperparameterRanges_Impl, \
HyperparameterRangeContinuous, HyperparameterRangeInteger, \
HyperparameterRangeCategorical, HyperparameterRanges
from ..datatypes.scaling import LogScaling, LinearScaling
from ..datatypes.tuning_job_state import TuningJobState
from ..gpautograd.constants import MCMCConfig, OptimizationConfig
from ..gpautograd.gp_regression import GaussianProcessRegression
from ..gpautograd.gpr_mcmc import GPRegressionMCMC
from ..gpautograd.kernel import Matern52, KernelFunction
from ..gpautograd.warping import WarpedKernel, Warping
from ..tuning_algorithms.base_classes import CandidateGenerator, dictionarize_objective
class RepeatedCandidateGenerator(CandidateGenerator):
"""Generates candidates from a fixed set. Used to test the deduplication logic."""
# Example black box function, with adjustable location of global minimum.
# Potentially could catch issues with optimizer, e.g. if the optimizer
# ignoring somehow candidates on the edge of search space.
# A simple quadratic function is used.
| 39.825397 | 87 | 0.704265 | # Could eventually remove this code: Is this needed in unit tests?
"""
Object definitions that are used for testing.
"""
from typing import Iterator, Tuple, Dict
import numpy as np
from ..datatypes.common import StateIdAndCandidate
from ..datatypes.hp_ranges import HyperparameterRanges_Impl, \
HyperparameterRangeContinuous, HyperparameterRangeInteger, \
HyperparameterRangeCategorical, HyperparameterRanges
from ..datatypes.scaling import LogScaling, LinearScaling
from ..datatypes.tuning_job_state import TuningJobState
from ..gpautograd.constants import MCMCConfig, OptimizationConfig
from ..gpautograd.gp_regression import GaussianProcessRegression
from ..gpautograd.gpr_mcmc import GPRegressionMCMC
from ..gpautograd.kernel import Matern52, KernelFunction
from ..gpautograd.warping import WarpedKernel, Warping
from ..tuning_algorithms.base_classes import CandidateGenerator, dictionarize_objective
def build_kernel(state: TuningJobState,
do_warping: bool = False) -> KernelFunction:
dims, warping_ranges = dimensionality_and_warping_ranges(state.hp_ranges)
kernel = Matern52(dims, ARD=True)
if do_warping:
return WarpedKernel(
kernel=kernel, warping=Warping(dims, warping_ranges))
else:
return kernel
def default_gpmodel(
state: TuningJobState, random_seed: int,
optimization_config: OptimizationConfig) -> GaussianProcessRegression:
return GaussianProcessRegression(
kernel=build_kernel(state),
optimization_config=optimization_config,
random_seed=random_seed
)
def default_gpmodel_mcmc(
state: TuningJobState, random_seed: int,
mcmc_config: MCMCConfig) -> GPRegressionMCMC:
return GPRegressionMCMC(
build_kernel=lambda: build_kernel(state),
mcmc_config=mcmc_config,
random_seed=random_seed
)
def dimensionality_and_warping_ranges(hp_ranges: HyperparameterRanges) -> \
Tuple[int, Dict[int, Tuple[float, float]]]:
dims = 0
warping_ranges = dict()
# NOTE: This explicit loop over hp_ranges will fail if
# HyperparameterRanges.hp_ranges is not implemented! Needs to be fixed if
# it becomes an issue, either by moving the functionality here into
# HyperparameterRanges, or by converting hp_ranges to
# HyperparameterRanges_Impl, which supports the hp_ranges property.
for hp_range in hp_ranges.hp_ranges:
if not isinstance(hp_range, HyperparameterRangeCategorical):
if isinstance(hp_range, HyperparameterRangeInteger):
lower = int(round(hp_range.lower_bound))
upper = int(round(hp_range.upper_bound))
else:
assert isinstance(hp_range, HyperparameterRangeContinuous)
lower = float(hp_range.lower_bound)
upper = float(hp_range.upper_bound)
lower_internal = hp_range.to_ndarray(lower).item()
upper_internal = hp_range.to_ndarray(upper).item()
if upper_internal > lower_internal: # exclude cases where max equal to min
warping_ranges[dims] = (lower_internal, upper_internal)
else:
assert upper_internal == lower_internal
dims += hp_range.ndarray_size()
return dims, warping_ranges
class RepeatedCandidateGenerator(CandidateGenerator):
"""Generates candidates from a fixed set. Used to test the deduplication logic."""
def __init__(self, n_unique_candidates: int):
self.all_unique_candidates = [
(1.0*j, j, "value_" + str(j))
for j in range(n_unique_candidates)
]
def generate_candidates(self) -> Iterator[StateIdAndCandidate]:
i = 0
while True:
i += 1
yield self.all_unique_candidates[i % len(self.all_unique_candidates)]
# Example black box function, with adjustable location of global minimum.
# Potentially could catch issues with optimizer, e.g. if the optimizer
# ignoring somehow candidates on the edge of search space.
# A simple quadratic function is used.
class Quadratic3d:
def __init__(self, local_minima, active_metric, metric_names):
# local_minima: point where local_minima is located
self.local_minima = np.array(local_minima).astype('float')
self.local_minima[0] = np.log10(self.local_minima[0])
self.active_metric = active_metric
self.metric_names = metric_names
@property
def search_space(self):
return HyperparameterRanges_Impl(
HyperparameterRangeContinuous('x', 1.0, 100.0, scaling=LogScaling()),
HyperparameterRangeInteger('y', 0, 2, scaling=LinearScaling()),
HyperparameterRangeCategorical('z', ('0.0', '1.0', '2.0'))
)
@property
def f_min(self):
return 0.0
def __call__(self, candidate):
p = np.array([float(hp) for hp in candidate])
p[0] = np.log10(p[0])
return dictionarize_objective(np.sum((self.local_minima - p) ** 2))
| 3,411 | 132 | 167 |
226f9d6772ad22e853aa633cbfc39bc49f430f60 | 521 | py | Python | profiles/log_signals.py | City-of-Helsinki/open-city-profile | a0e2f5457a377616c69501fe39ed03dc1490d493 | [
"MIT"
] | 5 | 2020-03-17T15:56:17.000Z | 2022-01-31T13:43:31.000Z | profiles/log_signals.py | City-of-Helsinki/open-city-profile | a0e2f5457a377616c69501fe39ed03dc1490d493 | [
"MIT"
] | 337 | 2018-05-21T08:35:05.000Z | 2022-03-14T07:38:15.000Z | profiles/log_signals.py | City-of-Helsinki/open-city-profile | a0e2f5457a377616c69501fe39ed03dc1490d493 | [
"MIT"
] | 10 | 2019-08-05T08:16:06.000Z | 2021-08-06T15:08:44.000Z | from django.db.models.signals import post_delete, post_init, post_save
from django.dispatch import receiver
from .audit_log import log
@receiver(post_delete)
@receiver(post_init)
@receiver(post_save)
| 22.652174 | 70 | 0.731286 | from django.db.models.signals import post_delete, post_init, post_save
from django.dispatch import receiver
from .audit_log import log
@receiver(post_delete)
def post_delete_audit_log(sender, instance, **kwargs):
log("DELETE", instance)
@receiver(post_init)
def post_init_audit_log(sender, instance, **kwargs):
log("READ", instance)
@receiver(post_save)
def post_save_audit_log(sender, instance, created, **kwargs):
if created:
log("CREATE", instance)
else:
log("UPDATE", instance)
| 248 | 0 | 66 |
54553d94bae2141c40c5e0286dfcc41e8c7528ef | 361 | py | Python | setup.py | DANS-repo/pyutils | cf29aafb55723cd00e205318f95a0111917f4918 | [
"Apache-2.0"
] | null | null | null | setup.py | DANS-repo/pyutils | cf29aafb55723cd00e205318f95a0111917f4918 | [
"Apache-2.0"
] | null | null | null | setup.py | DANS-repo/pyutils | cf29aafb55723cd00e205318f95a0111917f4918 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(
name='pyutils',
version='0.0.1rc',
packages=['pyutils'],
url='https://github.com/DANS-repo/pyutils',
license='Apache License Version 2.0',
author='hvdb',
author_email='',
description='A collection of utility methods, primarily written for use in notebooks',
install_requires=['pandas']
)
| 25.785714 | 90 | 0.6759 | from setuptools import setup
setup(
name='pyutils',
version='0.0.1rc',
packages=['pyutils'],
url='https://github.com/DANS-repo/pyutils',
license='Apache License Version 2.0',
author='hvdb',
author_email='',
description='A collection of utility methods, primarily written for use in notebooks',
install_requires=['pandas']
)
| 0 | 0 | 0 |
ddec4c7e9c1b22553bd90cc6e12e8a419afe9ad2 | 1,716 | py | Python | zajem_podatkov_esc.py | lukasimcic/Evrovizija-obdelava-podatkov | edf2a2e8b02b15d5868a5d0d3c7391cb403756af | [
"MIT"
] | null | null | null | zajem_podatkov_esc.py | lukasimcic/Evrovizija-obdelava-podatkov | edf2a2e8b02b15d5868a5d0d3c7391cb403756af | [
"MIT"
] | null | null | null | zajem_podatkov_esc.py | lukasimcic/Evrovizija-obdelava-podatkov | edf2a2e8b02b15d5868a5d0d3c7391cb403756af | [
"MIT"
] | null | null | null | import zajem_podatkov_orodja as orodja
import csv
import os
# naredimo slovar slovarjev za ustavljanje podatkov iz csv datoteke
slovar = {}
for leto in range(1994, 2020):
slovar[leto] = {}
# noter ustavimo podatke
with open(os.path.join(orodja.mapa, 'esc.csv'), newline='') as dat:
reader = csv.DictReader(dat, delimiter=';')
for vrstica in reader:
leto, država, točke = int(vrstica['Year']), vrstica['To country'], int(vrstica['Points '])
if leto < 1994 or vrstica['(semi-) final'] != 'f': # upoštevam le finalna tekmovanja od 1994 naprej
continue
elif država == 'Serbia & Montenegro': # te države, ki je obstajala le nekaj let, ne bom posebej obravnaval
continue
elif 'Macedonia' in država: # da se izognem 'F.Y.R. Macedonia' in 'North Macedonia'
država = 'Macedonia'
elif vrstica['From country'] not in slovar[leto]: # dodamo še države, ki se niso uvrstile v finale. Te bodo imele 0 točk
slovar[leto][vrstica['From country']] = 0
if država not in slovar[leto]:
slovar[leto][država] = točke
else:
slovar[leto][država] += točke
# preuredimo slovar slovarjev v seznam slovarjev
seznam_podatkov = []
for leto in slovar:
for država in slovar[leto]:
if leto >= 2016: # leta 2016 so spremenili točkovni sistem, tako da se štejejo dvojne točke
točke = slovar[leto][država] // 2
else:
točke = slovar[leto][država]
seznam_podatkov.append({'leto': leto, 'država': država, 'točke': točke})
orodja.zapisi_csv(seznam_podatkov, ['leto', 'država', 'točke'], os.path.join(orodja.mapa, 'uvrstitve.csv')) | 38.133333 | 128 | 0.634033 | import zajem_podatkov_orodja as orodja
import csv
import os
# naredimo slovar slovarjev za ustavljanje podatkov iz csv datoteke
slovar = {}
for leto in range(1994, 2020):
slovar[leto] = {}
# noter ustavimo podatke
with open(os.path.join(orodja.mapa, 'esc.csv'), newline='') as dat:
reader = csv.DictReader(dat, delimiter=';')
for vrstica in reader:
leto, država, točke = int(vrstica['Year']), vrstica['To country'], int(vrstica['Points '])
if leto < 1994 or vrstica['(semi-) final'] != 'f': # upoštevam le finalna tekmovanja od 1994 naprej
continue
elif država == 'Serbia & Montenegro': # te države, ki je obstajala le nekaj let, ne bom posebej obravnaval
continue
elif 'Macedonia' in država: # da se izognem 'F.Y.R. Macedonia' in 'North Macedonia'
država = 'Macedonia'
elif vrstica['From country'] not in slovar[leto]: # dodamo še države, ki se niso uvrstile v finale. Te bodo imele 0 točk
slovar[leto][vrstica['From country']] = 0
if država not in slovar[leto]:
slovar[leto][država] = točke
else:
slovar[leto][država] += točke
# preuredimo slovar slovarjev v seznam slovarjev
seznam_podatkov = []
for leto in slovar:
for država in slovar[leto]:
if leto >= 2016: # leta 2016 so spremenili točkovni sistem, tako da se štejejo dvojne točke
točke = slovar[leto][država] // 2
else:
točke = slovar[leto][država]
seznam_podatkov.append({'leto': leto, 'država': država, 'točke': točke})
orodja.zapisi_csv(seznam_podatkov, ['leto', 'država', 'točke'], os.path.join(orodja.mapa, 'uvrstitve.csv')) | 0 | 0 | 0 |
11f957ad0207fcef8e639057c669669d9cac188c | 313 | py | Python | Problem1/Numpy/Concatenate.py | Joanna-O-Ben/ADM-HW1 | 0a914d4ab5462fa563980644d3f5d777af61aef9 | [
"MIT"
] | null | null | null | Problem1/Numpy/Concatenate.py | Joanna-O-Ben/ADM-HW1 | 0a914d4ab5462fa563980644d3f5d777af61aef9 | [
"MIT"
] | null | null | null | Problem1/Numpy/Concatenate.py | Joanna-O-Ben/ADM-HW1 | 0a914d4ab5462fa563980644d3f5d777af61aef9 | [
"MIT"
] | null | null | null | import numpy
N, M, P = map(int, input().split())
arr_n = numpy.array([input().strip().split() for _ in range(N)], int)
arr_m = numpy.array([input().strip().split() for _ in range(M)], int)
ar_N = numpy.reshape(arr_n, (N, P))
ar_M = numpy.reshape(arr_m, (M, P))
print(numpy.concatenate((ar_N, ar_M), axis = 0)) | 28.454545 | 69 | 0.638978 | import numpy
N, M, P = map(int, input().split())
arr_n = numpy.array([input().strip().split() for _ in range(N)], int)
arr_m = numpy.array([input().strip().split() for _ in range(M)], int)
ar_N = numpy.reshape(arr_n, (N, P))
ar_M = numpy.reshape(arr_m, (M, P))
print(numpy.concatenate((ar_N, ar_M), axis = 0)) | 0 | 0 | 0 |
86a45a1526c0582f537f27a43324c5430b265151 | 217 | py | Python | scraping/quotes/app.py | aTechGuide/python | aa4fdfdd1676e3524a14b7e7b694c00672b2980c | [
"MIT"
] | null | null | null | scraping/quotes/app.py | aTechGuide/python | aa4fdfdd1676e3524a14b7e7b694c00672b2980c | [
"MIT"
] | null | null | null | scraping/quotes/app.py | aTechGuide/python | aa4fdfdd1676e3524a14b7e7b694c00672b2980c | [
"MIT"
] | null | null | null | import requests
from scraping.quotes.pages.quotes_page import QuotesPage
page_content = requests.get('http://quotes.toscrape.com').content
page = QuotesPage(page_content)
for quote in page.quotes:
print(quote)
| 21.7 | 65 | 0.788018 | import requests
from scraping.quotes.pages.quotes_page import QuotesPage
page_content = requests.get('http://quotes.toscrape.com').content
page = QuotesPage(page_content)
for quote in page.quotes:
print(quote)
| 0 | 0 | 0 |
69bb0948975ce0022cfa4e316a55335e0586b963 | 15,237 | py | Python | halotools/utils/table_utils.py | pllim/halotools | 6499cff09e7e0f169e4f425ee265403f6be816e8 | [
"BSD-3-Clause"
] | 83 | 2015-01-15T14:54:16.000Z | 2021-12-09T11:28:02.000Z | halotools/utils/table_utils.py | pllim/halotools | 6499cff09e7e0f169e4f425ee265403f6be816e8 | [
"BSD-3-Clause"
] | 579 | 2015-01-14T15:57:37.000Z | 2022-01-13T18:58:44.000Z | halotools/utils/table_utils.py | pllim/halotools | 6499cff09e7e0f169e4f425ee265403f6be816e8 | [
"BSD-3-Clause"
] | 70 | 2015-01-14T15:15:58.000Z | 2021-12-22T18:18:31.000Z | r"""
Modules performing small, commonly used tasks throughout the package.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from math import ceil
import numpy as np
from warnings import warn
from astropy.table import Table
from ..custom_exceptions import HalotoolsError
__all__ = ['SampleSelector']
def compute_conditional_percentiles(**kwargs):
r"""
In bins of the ``prim_haloprop``, compute the rank-order percentile
of the input ``table`` based on the value of ``sec_haloprop``.
Parameters
----------
table : astropy table, optional
a keyword argument that stores halo catalog being used to make mock galaxy population
If a `table` is passed, the `prim_haloprop_key` and `sec_haloprop_key` keys
must also be passed. If not passing a `table`, you must directly pass the
`prim_haloprop` and `sec_haloprop` keyword arguments.
prim_haloprop_key : string, optional
Name of the column of the input ``table`` that will be used to access the
primary halo property. `compute_conditional_percentiles` bins the ``table`` by
``prim_haloprop_key`` when computing the result.
sec_haloprop_key : string, optional
Name of the column of the input ``table`` that will be used to access the
secondary halo property. `compute_conditional_percentiles` bins the ``table`` by
``prim_haloprop_key``, and in each bin uses the value stored in ``sec_haloprop_key``
to compute the ``prim_haloprop``-conditioned rank-order percentile.
prim_haloprop : array_like, optional
Array storing the primary halo property used to bin the input points.
If a `prim_haloprop` is passed, you must also pass a `sec_haloprop`.
sec_haloprop : array_like, optional
Array storing the secondary halo property used to define the conditional percentiles
in each bin of `prim_haloprop`.
prim_haloprop_bin_boundaries : array, optional
Array defining the boundaries by which we will bin the input ``table``.
Default is None, in which case the binning will be automatically determined using
the ``dlog10_prim_haloprop`` keyword.
dlog10_prim_haloprop : float, optional
Logarithmic spacing of bins of the mass-like variable within which
we will assign secondary property percentiles. Default is 0.2.
Examples
--------
>>> from halotools.sim_manager import FakeSim
>>> fakesim = FakeSim()
>>> result = compute_conditional_percentiles(table = fakesim.halo_table, prim_haloprop_key = 'halo_mvir', sec_haloprop_key = 'halo_vmax')
Notes
-----
The sign of the result is such that in bins of the primary property,
*smaller* values of the secondary property
receive *smaller* values of the returned percentile.
"""
if 'table' in kwargs:
table = kwargs['table']
try:
prim_haloprop_key = kwargs['prim_haloprop_key']
prim_haloprop = table[prim_haloprop_key]
sec_haloprop_key = kwargs['sec_haloprop_key']
sec_haloprop = table[sec_haloprop_key]
except KeyError:
msg = ("\nWhen passing an input ``table`` to the ``compute_conditional_percentiles`` method,\n"
"you must also pass ``prim_haloprop_key`` and ``sec_haloprop_key`` keyword arguments\n"
"whose values are column keys of the input ``table``\n")
raise HalotoolsError(msg)
else:
try:
prim_haloprop = kwargs['prim_haloprop']
sec_haloprop = kwargs['sec_haloprop']
except KeyError:
msg = ("\nIf not passing an input ``table`` to the ``compute_conditional_percentiles`` method,\n"
"you must pass a ``prim_haloprop`` and ``sec_haloprop`` arguments\n")
raise HalotoolsError(msg)
def compute_prim_haloprop_bins(dlog10_prim_haloprop=0.05, **kwargs):
r"""
Parameters
----------
prim_haloprop : array
Array storing the value of the primary halo property column of the ``table``
passed to ``compute_conditional_percentiles``.
prim_haloprop_bin_boundaries : array, optional
Array defining the boundaries by which we will bin the input ``table``.
Default is None, in which case the binning will be automatically determined using
the ``dlog10_prim_haloprop`` keyword.
dlog10_prim_haloprop : float, optional
Logarithmic spacing of bins of the mass-like variable within which
we will assign secondary property percentiles. Default is 0.2.
Returns
--------
output : array
Numpy array of integers storing the bin index of the prim_haloprop bin
to which each halo in the input table was assigned.
"""
try:
prim_haloprop = kwargs['prim_haloprop']
except KeyError:
msg = ("The ``compute_prim_haloprop_bins`` method "
"requires the ``prim_haloprop`` keyword argument")
raise HalotoolsError(msg)
try:
prim_haloprop_bin_boundaries = kwargs['prim_haloprop_bin_boundaries']
except KeyError:
lg10_min_prim_haloprop = np.log10(np.min(prim_haloprop))-0.001
lg10_max_prim_haloprop = np.log10(np.max(prim_haloprop))+0.001
num_prim_haloprop_bins = (lg10_max_prim_haloprop-lg10_min_prim_haloprop)/dlog10_prim_haloprop
prim_haloprop_bin_boundaries = np.logspace(
lg10_min_prim_haloprop, lg10_max_prim_haloprop,
num=int(ceil(num_prim_haloprop_bins)))
# digitize the masses so that we can access them bin-wise
output = np.digitize(prim_haloprop, prim_haloprop_bin_boundaries)
# Use the largest bin for any points larger than the largest bin boundary,
# and raise a warning if such points are found
Nbins = len(prim_haloprop_bin_boundaries)
if Nbins in output:
msg = ("\n\nThe ``compute_prim_haloprop_bins`` function detected points in the \n"
"input array of primary halo property that were larger than the largest value\n"
"of the input ``prim_haloprop_bin_boundaries``. All such points will be assigned\n"
"to the largest bin.\nBe sure that this is the behavior you expect for your application.\n\n")
warn(msg)
output = np.where(output == Nbins, Nbins-1, output)
return output
compute_prim_haloprop_bins_dict = {}
compute_prim_haloprop_bins_dict['prim_haloprop'] = prim_haloprop
try:
compute_prim_haloprop_bins_dict['prim_haloprop_bin_boundaries'] = (
kwargs['prim_haloprop_bin_boundaries'])
except KeyError:
pass
try:
compute_prim_haloprop_bins_dict['dlog10_prim_haloprop'] = kwargs['dlog10_prim_haloprop']
except KeyError:
pass
prim_haloprop_bins = compute_prim_haloprop_bins(**compute_prim_haloprop_bins_dict)
output = np.zeros_like(prim_haloprop)
# sort on secondary property only with each mass bin
bins_in_halocat = set(prim_haloprop_bins)
for ibin in bins_in_halocat:
indices_of_prim_haloprop_bin = np.where(prim_haloprop_bins == ibin)[0]
num_in_bin = len(sec_haloprop[indices_of_prim_haloprop_bin])
# Find the indices that sort by the secondary property
ind_sorted = np.argsort(sec_haloprop[indices_of_prim_haloprop_bin])
percentiles = np.zeros(num_in_bin)
percentiles[ind_sorted] = (np.arange(num_in_bin) + 1.0) / float(num_in_bin)
# place the percentiles into the catalog
output[indices_of_prim_haloprop_bin] = percentiles
return output
class SampleSelector(object):
r""" Container class for commonly used sample selections.
"""
@staticmethod
def host_halo_selection(return_subhalos=False, **kwargs):
r""" Method divides sample in to host halos and subhalos, and returns
either the hosts or the hosts and the subs depending
on the value of the input ``return_subhalos``.
"""
table = kwargs['table']
mask = table['halo_upid'] == -1
if return_subhalos is False:
return table[mask]
else:
return table[mask], table[~mask]
@staticmethod
def property_range(lower_bound=-float("inf"), upper_bound=float("inf"),
return_complement=False, host_halos_only=False, subhalos_only=False, **kwargs):
r""" Method makes a cut on an input table column based on an input upper and lower bound, and
returns the cut table.
Parameters
----------
table : Astropy Table object, keyword argument
key : string, keyword argument
Column name that will be used to apply the cut
lower_bound : float, optional keyword argument
Minimum value for the input column of the returned table. Default is :math:`-\infty`.
upper_bound : float, optional keyword argument
Maximum value for the input column of the returned table. Default is :math:`+\infty`.
return_complement : bool, optional keyword argument
If True, `property_range` gives the table elements that do not pass the cut
as the second return argument. Default is False.
host_halos_only : bool, optional keyword argument
If true, `property_range` will use the `host_halo_selection` method to
make an additional cut on the sample so that only host halos are returned.
Default is False
subhalos_only : bool, optional keyword argument
If true, `property_range` will use the `host_halo_selection` method to
make an additional cut on the sample so that only subhalos are returned.
Default is False
Returns
-------
cut_table : Astropy Table object
Examples
---------
To demonstrate the `property_range` method, we will start out by loading
a table of halos into memory using the `FakeSim` class:
>>> from halotools.sim_manager import FakeSim
>>> halocat = FakeSim()
>>> halos = halocat.halo_table
To make a cut on the halo catalog to select halos in a specific mass range:
>>> halo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13)
To apply this same cut, and also only select host halos passing the cut, we use the ``host_halos_only`` keyword:
>>> host_halo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13, host_halos_only=True)
The same applies if we only want subhalos returned only now we use the ``subhalos_only`` keyword:
>>> subhalo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13, subhalos_only=True)
"""
table = kwargs['table']
# First apply the host halo cut, if applicable
if (host_halos_only is True) & (subhalos_only is True):
raise KeyError("You cannot simultaneously select only host halos and only subhalos")
elif host_halos_only is True:
table = SampleSelector.host_halo_selection(table=table)
elif subhalos_only is True:
hosts, table = SampleSelector.host_halo_selection(table=table, return_subhalos=True)
key = kwargs['key']
mask = (table[key] >= lower_bound) & (table[key] <= upper_bound)
if return_complement is True:
return table[mask], table[np.invert(mask)]
else:
return table[mask]
@staticmethod
def split_sample(**kwargs):
r""" Method divides a sample into subsamples based on the percentile ranking of a given property.
Parameters
----------
table : Astropy Table object, keyword argument
key : string, keyword argument
Column name that will be used to define the percentiles
percentiles : array_like
Sequence of percentiles used to define the returned subsamples. If ``percentiles``
has more than one element, the elements must be monotonically increasing.
If ``percentiles`` is length-N, there will be N+1 returned subsamples.
Returns
-------
subsamples : list
Examples
--------
To demonstrate the `split_sample` method, we will start out by loading
a table of halos into memory using the `FakeSim` class:
>>> from halotools.sim_manager import FakeSim
>>> halocat = FakeSim()
>>> halos = halocat.halo_table
We can easily use `split_sample` to divide the sample into a high-Vmax and low-Vmax subsamples:
>>> sample_below_median, sample_above_median = SampleSelector.split_sample(table = halos, key = 'halo_vmax', percentiles = 0.5)
Likewise, we can do the same thing to divide the sample into quartiles:
>>> lowest, lower, higher, highest = SampleSelector.split_sample(table = halos, key = 'halo_zhalf', percentiles = [0.25, 0.5, 0.75])
The following alternative syntax is also supported:
>>> subsample_collection = SampleSelector.split_sample(table = halos, key = 'halo_zhalf', percentiles = [0.25, 0.5, 0.75])
>>> lowest, lower, higher, highest = subsample_collection
"""
table = kwargs['table']
if not isinstance(table, Table):
raise TypeError("Input table must be an Astropy Table instance")
key = kwargs['key']
if key not in list(table.keys()):
raise KeyError("The ``{0}`` key does not appear in the table you are trying \n"
"to split into subsamples".format(key))
table.sort(key)
percentiles = kwargs['percentiles']
percentiles = np.array(percentiles)
if np.shape(percentiles) == ():
percentiles = np.array([percentiles])
num_total = len(table)
if len(percentiles) >= num_total:
raise ValueError("Input length of percentiles must be less than input table length")
indices = percentiles*num_total
indices = np.insert(indices, 0, 0)
percentiles = np.insert(percentiles, 0, 0)
indices = indices.astype(int)
indices = np.append(indices, len(table))
percentiles = np.append(percentiles, 1.0)
d = np.diff(indices)
d[-1] -= 1
if 0 in d:
print("Raise exception: too many percentile bins")
idx_too_few = np.nanargmin(d)
raise ValueError("The input percentiles spacing is too fine.\n"
"For example, there are no table elements in the percentile range (%.2f, %.2f)" %
(percentiles[idx_too_few], percentiles[idx_too_few+1]))
result = np.zeros(len(indices)-1, dtype=object)
for i, first_idx, last_idx in zip(list(range(len(result))), indices[:-1], indices[1:]):
result[i] = table[first_idx:last_idx]
return result
| 42.09116 | 156 | 0.658069 | r"""
Modules performing small, commonly used tasks throughout the package.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from math import ceil
import numpy as np
from warnings import warn
from astropy.table import Table
from ..custom_exceptions import HalotoolsError
__all__ = ['SampleSelector']
def compute_conditional_percentiles(**kwargs):
r"""
In bins of the ``prim_haloprop``, compute the rank-order percentile
of the input ``table`` based on the value of ``sec_haloprop``.
Parameters
----------
table : astropy table, optional
a keyword argument that stores halo catalog being used to make mock galaxy population
If a `table` is passed, the `prim_haloprop_key` and `sec_haloprop_key` keys
must also be passed. If not passing a `table`, you must directly pass the
`prim_haloprop` and `sec_haloprop` keyword arguments.
prim_haloprop_key : string, optional
Name of the column of the input ``table`` that will be used to access the
primary halo property. `compute_conditional_percentiles` bins the ``table`` by
``prim_haloprop_key`` when computing the result.
sec_haloprop_key : string, optional
Name of the column of the input ``table`` that will be used to access the
secondary halo property. `compute_conditional_percentiles` bins the ``table`` by
``prim_haloprop_key``, and in each bin uses the value stored in ``sec_haloprop_key``
to compute the ``prim_haloprop``-conditioned rank-order percentile.
prim_haloprop : array_like, optional
Array storing the primary halo property used to bin the input points.
If a `prim_haloprop` is passed, you must also pass a `sec_haloprop`.
sec_haloprop : array_like, optional
Array storing the secondary halo property used to define the conditional percentiles
in each bin of `prim_haloprop`.
prim_haloprop_bin_boundaries : array, optional
Array defining the boundaries by which we will bin the input ``table``.
Default is None, in which case the binning will be automatically determined using
the ``dlog10_prim_haloprop`` keyword.
dlog10_prim_haloprop : float, optional
Logarithmic spacing of bins of the mass-like variable within which
we will assign secondary property percentiles. Default is 0.2.
Examples
--------
>>> from halotools.sim_manager import FakeSim
>>> fakesim = FakeSim()
>>> result = compute_conditional_percentiles(table = fakesim.halo_table, prim_haloprop_key = 'halo_mvir', sec_haloprop_key = 'halo_vmax')
Notes
-----
The sign of the result is such that in bins of the primary property,
*smaller* values of the secondary property
receive *smaller* values of the returned percentile.
"""
if 'table' in kwargs:
table = kwargs['table']
try:
prim_haloprop_key = kwargs['prim_haloprop_key']
prim_haloprop = table[prim_haloprop_key]
sec_haloprop_key = kwargs['sec_haloprop_key']
sec_haloprop = table[sec_haloprop_key]
except KeyError:
msg = ("\nWhen passing an input ``table`` to the ``compute_conditional_percentiles`` method,\n"
"you must also pass ``prim_haloprop_key`` and ``sec_haloprop_key`` keyword arguments\n"
"whose values are column keys of the input ``table``\n")
raise HalotoolsError(msg)
else:
try:
prim_haloprop = kwargs['prim_haloprop']
sec_haloprop = kwargs['sec_haloprop']
except KeyError:
msg = ("\nIf not passing an input ``table`` to the ``compute_conditional_percentiles`` method,\n"
"you must pass a ``prim_haloprop`` and ``sec_haloprop`` arguments\n")
raise HalotoolsError(msg)
def compute_prim_haloprop_bins(dlog10_prim_haloprop=0.05, **kwargs):
r"""
Parameters
----------
prim_haloprop : array
Array storing the value of the primary halo property column of the ``table``
passed to ``compute_conditional_percentiles``.
prim_haloprop_bin_boundaries : array, optional
Array defining the boundaries by which we will bin the input ``table``.
Default is None, in which case the binning will be automatically determined using
the ``dlog10_prim_haloprop`` keyword.
dlog10_prim_haloprop : float, optional
Logarithmic spacing of bins of the mass-like variable within which
we will assign secondary property percentiles. Default is 0.2.
Returns
--------
output : array
Numpy array of integers storing the bin index of the prim_haloprop bin
to which each halo in the input table was assigned.
"""
try:
prim_haloprop = kwargs['prim_haloprop']
except KeyError:
msg = ("The ``compute_prim_haloprop_bins`` method "
"requires the ``prim_haloprop`` keyword argument")
raise HalotoolsError(msg)
try:
prim_haloprop_bin_boundaries = kwargs['prim_haloprop_bin_boundaries']
except KeyError:
lg10_min_prim_haloprop = np.log10(np.min(prim_haloprop))-0.001
lg10_max_prim_haloprop = np.log10(np.max(prim_haloprop))+0.001
num_prim_haloprop_bins = (lg10_max_prim_haloprop-lg10_min_prim_haloprop)/dlog10_prim_haloprop
prim_haloprop_bin_boundaries = np.logspace(
lg10_min_prim_haloprop, lg10_max_prim_haloprop,
num=int(ceil(num_prim_haloprop_bins)))
# digitize the masses so that we can access them bin-wise
output = np.digitize(prim_haloprop, prim_haloprop_bin_boundaries)
# Use the largest bin for any points larger than the largest bin boundary,
# and raise a warning if such points are found
Nbins = len(prim_haloprop_bin_boundaries)
if Nbins in output:
msg = ("\n\nThe ``compute_prim_haloprop_bins`` function detected points in the \n"
"input array of primary halo property that were larger than the largest value\n"
"of the input ``prim_haloprop_bin_boundaries``. All such points will be assigned\n"
"to the largest bin.\nBe sure that this is the behavior you expect for your application.\n\n")
warn(msg)
output = np.where(output == Nbins, Nbins-1, output)
return output
compute_prim_haloprop_bins_dict = {}
compute_prim_haloprop_bins_dict['prim_haloprop'] = prim_haloprop
try:
compute_prim_haloprop_bins_dict['prim_haloprop_bin_boundaries'] = (
kwargs['prim_haloprop_bin_boundaries'])
except KeyError:
pass
try:
compute_prim_haloprop_bins_dict['dlog10_prim_haloprop'] = kwargs['dlog10_prim_haloprop']
except KeyError:
pass
prim_haloprop_bins = compute_prim_haloprop_bins(**compute_prim_haloprop_bins_dict)
output = np.zeros_like(prim_haloprop)
# sort on secondary property only with each mass bin
bins_in_halocat = set(prim_haloprop_bins)
for ibin in bins_in_halocat:
indices_of_prim_haloprop_bin = np.where(prim_haloprop_bins == ibin)[0]
num_in_bin = len(sec_haloprop[indices_of_prim_haloprop_bin])
# Find the indices that sort by the secondary property
ind_sorted = np.argsort(sec_haloprop[indices_of_prim_haloprop_bin])
percentiles = np.zeros(num_in_bin)
percentiles[ind_sorted] = (np.arange(num_in_bin) + 1.0) / float(num_in_bin)
# place the percentiles into the catalog
output[indices_of_prim_haloprop_bin] = percentiles
return output
class SampleSelector(object):
r""" Container class for commonly used sample selections.
"""
@staticmethod
def host_halo_selection(return_subhalos=False, **kwargs):
r""" Method divides sample in to host halos and subhalos, and returns
either the hosts or the hosts and the subs depending
on the value of the input ``return_subhalos``.
"""
table = kwargs['table']
mask = table['halo_upid'] == -1
if return_subhalos is False:
return table[mask]
else:
return table[mask], table[~mask]
@staticmethod
def property_range(lower_bound=-float("inf"), upper_bound=float("inf"),
return_complement=False, host_halos_only=False, subhalos_only=False, **kwargs):
r""" Method makes a cut on an input table column based on an input upper and lower bound, and
returns the cut table.
Parameters
----------
table : Astropy Table object, keyword argument
key : string, keyword argument
Column name that will be used to apply the cut
lower_bound : float, optional keyword argument
Minimum value for the input column of the returned table. Default is :math:`-\infty`.
upper_bound : float, optional keyword argument
Maximum value for the input column of the returned table. Default is :math:`+\infty`.
return_complement : bool, optional keyword argument
If True, `property_range` gives the table elements that do not pass the cut
as the second return argument. Default is False.
host_halos_only : bool, optional keyword argument
If true, `property_range` will use the `host_halo_selection` method to
make an additional cut on the sample so that only host halos are returned.
Default is False
subhalos_only : bool, optional keyword argument
If true, `property_range` will use the `host_halo_selection` method to
make an additional cut on the sample so that only subhalos are returned.
Default is False
Returns
-------
cut_table : Astropy Table object
Examples
---------
To demonstrate the `property_range` method, we will start out by loading
a table of halos into memory using the `FakeSim` class:
>>> from halotools.sim_manager import FakeSim
>>> halocat = FakeSim()
>>> halos = halocat.halo_table
To make a cut on the halo catalog to select halos in a specific mass range:
>>> halo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13)
To apply this same cut, and also only select host halos passing the cut, we use the ``host_halos_only`` keyword:
>>> host_halo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13, host_halos_only=True)
The same applies if we only want subhalos returned only now we use the ``subhalos_only`` keyword:
>>> subhalo_sample = SampleSelector.property_range(table = halos, key = 'halo_mvir', lower_bound = 1e12, upper_bound = 1e13, subhalos_only=True)
"""
table = kwargs['table']
# First apply the host halo cut, if applicable
if (host_halos_only is True) & (subhalos_only is True):
raise KeyError("You cannot simultaneously select only host halos and only subhalos")
elif host_halos_only is True:
table = SampleSelector.host_halo_selection(table=table)
elif subhalos_only is True:
hosts, table = SampleSelector.host_halo_selection(table=table, return_subhalos=True)
key = kwargs['key']
mask = (table[key] >= lower_bound) & (table[key] <= upper_bound)
if return_complement is True:
return table[mask], table[np.invert(mask)]
else:
return table[mask]
@staticmethod
def split_sample(**kwargs):
r""" Method divides a sample into subsamples based on the percentile ranking of a given property.
Parameters
----------
table : Astropy Table object, keyword argument
key : string, keyword argument
Column name that will be used to define the percentiles
percentiles : array_like
Sequence of percentiles used to define the returned subsamples. If ``percentiles``
has more than one element, the elements must be monotonically increasing.
If ``percentiles`` is length-N, there will be N+1 returned subsamples.
Returns
-------
subsamples : list
Examples
--------
To demonstrate the `split_sample` method, we will start out by loading
a table of halos into memory using the `FakeSim` class:
>>> from halotools.sim_manager import FakeSim
>>> halocat = FakeSim()
>>> halos = halocat.halo_table
We can easily use `split_sample` to divide the sample into a high-Vmax and low-Vmax subsamples:
>>> sample_below_median, sample_above_median = SampleSelector.split_sample(table = halos, key = 'halo_vmax', percentiles = 0.5)
Likewise, we can do the same thing to divide the sample into quartiles:
>>> lowest, lower, higher, highest = SampleSelector.split_sample(table = halos, key = 'halo_zhalf', percentiles = [0.25, 0.5, 0.75])
The following alternative syntax is also supported:
>>> subsample_collection = SampleSelector.split_sample(table = halos, key = 'halo_zhalf', percentiles = [0.25, 0.5, 0.75])
>>> lowest, lower, higher, highest = subsample_collection
"""
table = kwargs['table']
if not isinstance(table, Table):
raise TypeError("Input table must be an Astropy Table instance")
key = kwargs['key']
if key not in list(table.keys()):
raise KeyError("The ``{0}`` key does not appear in the table you are trying \n"
"to split into subsamples".format(key))
table.sort(key)
percentiles = kwargs['percentiles']
percentiles = np.array(percentiles)
if np.shape(percentiles) == ():
percentiles = np.array([percentiles])
num_total = len(table)
if len(percentiles) >= num_total:
raise ValueError("Input length of percentiles must be less than input table length")
indices = percentiles*num_total
indices = np.insert(indices, 0, 0)
percentiles = np.insert(percentiles, 0, 0)
indices = indices.astype(int)
indices = np.append(indices, len(table))
percentiles = np.append(percentiles, 1.0)
d = np.diff(indices)
d[-1] -= 1
if 0 in d:
print("Raise exception: too many percentile bins")
idx_too_few = np.nanargmin(d)
raise ValueError("The input percentiles spacing is too fine.\n"
"For example, there are no table elements in the percentile range (%.2f, %.2f)" %
(percentiles[idx_too_few], percentiles[idx_too_few+1]))
result = np.zeros(len(indices)-1, dtype=object)
for i, first_idx, last_idx in zip(list(range(len(result))), indices[:-1], indices[1:]):
result[i] = table[first_idx:last_idx]
return result
| 0 | 0 | 0 |
6dec065fc7917448c6280be83b6fd3cb811fdc90 | 581 | py | Python | main.py | KeitaShiratori/ripple | 183abdbb5f3853acab459c703c3c10d3fcd47e9b | [
"MIT"
] | null | null | null | main.py | KeitaShiratori/ripple | 183abdbb5f3853acab459c703c3c10d3fcd47e9b | [
"MIT"
] | null | null | null | main.py | KeitaShiratori/ripple | 183abdbb5f3853acab459c703c3c10d3fcd47e9b | [
"MIT"
] | null | null | null | from flask import Flask, send_from_directory, session, request
import os
app = Flask(__name__)
from route.top import top
from route.prj import prj
app.register_blueprint(top)
app.register_blueprint(prj)
# sessionを有効にするための秘密鍵
app.secret_key = os.environ.get('SECRET_KEY')
# limit upload file size : 1MB
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
@app.route('/favicon.ico')
if __name__ == '__main__':
app.debug = True
app.run(host='127.0.0.1',port=5000)
| 24.208333 | 95 | 0.745267 | from flask import Flask, send_from_directory, session, request
import os
app = Flask(__name__)
from route.top import top
from route.prj import prj
app.register_blueprint(top)
app.register_blueprint(prj)
# sessionを有効にするための秘密鍵
app.secret_key = os.environ.get('SECRET_KEY')
# limit upload file size : 1MB
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static/common/img'), 'favicon.ico', )
if __name__ == '__main__':
app.debug = True
app.run(host='127.0.0.1',port=5000)
| 89 | 0 | 22 |
21adbf92ebdc4edac1ac2ac49cc7b3804490d783 | 4,223 | py | Python | crosshair/diff_behavior_test.py | mristin/CrossHair | 66a44a0d10021e0b1e2d847a677274e62ddd1e9d | [
"MIT"
] | null | null | null | crosshair/diff_behavior_test.py | mristin/CrossHair | 66a44a0d10021e0b1e2d847a677274e62ddd1e9d | [
"MIT"
] | 1 | 2021-05-04T12:52:52.000Z | 2021-05-04T17:06:23.000Z | crosshair/diff_behavior_test.py | JinghanCode/Crosshair-Hypothesis | cbad2586fb797b2fa2326e5a9f550c6d56810f2e | [
"MIT"
] | null | null | null | import dis
import unittest
import sys
from typing import List
from typing import Optional
from crosshair.diff_behavior import BehaviorDiff
from crosshair.diff_behavior import diff_behavior
from crosshair.fnutil import walk_qualname
from crosshair.fnutil import FunctionInfo
from crosshair.options import AnalysisOptions
from crosshair.options import DEFAULT_OPTIONS
from crosshair.util import debug
from crosshair.util import set_debug
foo1 = FunctionInfo.from_fn(_foo1)
foo2 = FunctionInfo.from_fn(_foo2)
foo3 = FunctionInfo.from_fn(_foo3)
if __name__ == "__main__":
if ("-v" in sys.argv) or ("--verbose" in sys.argv):
set_debug(True)
unittest.main()
| 28.924658 | 88 | 0.609519 | import dis
import unittest
import sys
from typing import List
from typing import Optional
from crosshair.diff_behavior import BehaviorDiff
from crosshair.diff_behavior import diff_behavior
from crosshair.fnutil import walk_qualname
from crosshair.fnutil import FunctionInfo
from crosshair.options import AnalysisOptions
from crosshair.options import DEFAULT_OPTIONS
from crosshair.util import debug
from crosshair.util import set_debug
def _foo1(x: int) -> int:
if x >= 100:
return 100
return x
foo1 = FunctionInfo.from_fn(_foo1)
def _foo2(x: int) -> int:
return min(x, 100)
foo2 = FunctionInfo.from_fn(_foo2)
def _foo3(x: int) -> int:
if x > 1000:
return 1000
elif x > 100:
return 100
else:
return x
foo3 = FunctionInfo.from_fn(_foo3)
class Base:
def foo(self):
return 10
@staticmethod
def staticfoo(x: int) -> int:
return min(x, 100)
class Derived(Base):
def foo(self):
return 11
class BehaviorDiffTest(unittest.TestCase):
def test_diff_method(self) -> None:
diffs = diff_behavior(
walk_qualname(Base, "foo"),
walk_qualname(Derived, "foo"),
DEFAULT_OPTIONS.overlay(max_iterations=10),
)
assert isinstance(diffs, list)
self.assertEqual(
[(d.result1.return_repr, d.result2.return_repr) for d in diffs],
[("10", "11")],
)
def test_diff_staticmethod(self) -> None:
diffs = diff_behavior(
walk_qualname(Base, "staticfoo"),
foo2,
DEFAULT_OPTIONS.overlay(max_iterations=10),
)
self.assertEqual(diffs, [])
def test_diff_behavior_same(self) -> None:
diffs = diff_behavior(foo1, foo2, DEFAULT_OPTIONS.overlay(max_iterations=10))
self.assertEqual(diffs, [])
def test_diff_behavior_different(self) -> None:
diffs = diff_behavior(foo1, foo3, DEFAULT_OPTIONS.overlay(max_iterations=10))
self.assertEqual(len(diffs), 1)
diff = diffs[0]
assert isinstance(diff, BehaviorDiff)
self.assertGreater(int(diff.args["x"]), 1000)
self.assertEqual(diff.result1.return_repr, "100")
self.assertEqual(diff.result2.return_repr, "1000")
def test_diff_behavior_mutation(self) -> None:
def cut_out_item1(a: List[int], i: int):
a[i : i + 1] = []
def cut_out_item2(a: List[int], i: int):
a[:] = a[:i] + a[i + 1 :]
# TODO: this takes longer than I'd like (few iterations though):
opts = DEFAULT_OPTIONS.overlay(
max_iterations=20, per_path_timeout=10, per_condition_timeout=10
)
diffs = diff_behavior(
FunctionInfo.from_fn(cut_out_item1),
FunctionInfo.from_fn(cut_out_item2),
opts,
)
assert not isinstance(diffs, str)
self.assertEqual(len(diffs), 1)
diff = diffs[0]
self.assertGreater(len(diff.args["a"]), 1)
self.assertEqual(diff.args["i"], "-1")
def test_example_coverage(self) -> None:
# Try to get examples that highlist the differences in the code.
# Here, we add more conditions for the `return True` path and
# another case where we used to just `return False`.
def isack1(s: str) -> bool:
if s in ("y", "yes"):
return True
return False
def isack2(s: str) -> Optional[bool]:
if s in ("y", "yes", "Y", "YES"):
return True
if s in ("n", "no", "N", "NO"):
return False
return None
diffs = diff_behavior(
FunctionInfo.from_fn(isack1),
FunctionInfo.from_fn(isack2),
DEFAULT_OPTIONS.overlay(max_iterations=20, per_condition_timeout=5),
)
debug("diffs=", diffs)
assert not isinstance(diffs, str)
return_vals = set((d.result1.return_repr, d.result2.return_repr) for d in diffs)
self.assertEqual(return_vals, {("False", "None"), ("False", "True")})
if __name__ == "__main__":
if ("-v" in sys.argv) or ("--verbose" in sys.argv):
set_debug(True)
unittest.main()
| 3,134 | 81 | 325 |
c715f8b14d33a64ac154ce8912787db8e6eb54e0 | 2,652 | py | Python | tests/test_opus_file.py | AndreasGocht/PyOgg | f0a7c5aa3d9d987179c5561a96b86688c6e235d3 | [
"BSD-3-Clause",
"Unlicense"
] | 24 | 2017-09-01T06:17:48.000Z | 2020-09-06T17:17:35.000Z | tests/test_opus_file.py | AndreasGocht/PyOgg | f0a7c5aa3d9d987179c5561a96b86688c6e235d3 | [
"BSD-3-Clause",
"Unlicense"
] | 44 | 2020-09-10T23:39:40.000Z | 2022-03-05T20:20:03.000Z | tests/test_opus_file.py | AndreasGocht/PyOgg | f0a7c5aa3d9d987179c5561a96b86688c6e235d3 | [
"BSD-3-Clause",
"Unlicense"
] | 9 | 2020-10-31T22:21:30.000Z | 2022-01-31T20:00:36.000Z | import pytest
import pyogg
import os
from config import Config
# FIXME: This shouldn't be a source of error, but it currently is.
# This works in macOS and probably Linux, but not Windows.
# def test_unicode_filename(pyogg_config: Config):
# filename = str(
# pyogg_config.rootdir
# / "examples/unicode filename 🎵.opus"
# )
# opus_file = pyogg.OpusFile(filename)
| 29.797753 | 66 | 0.693816 | import pytest
import pyogg
import os
from config import Config
def test_error_in_filename() -> None:
# Load a non-existant file
filename = "does-not-exist.opus"
with pytest.raises(pyogg.PyOggError):
opus_file = pyogg.OpusFile(filename)
# FIXME: This shouldn't be a source of error, but it currently is.
# This works in macOS and probably Linux, but not Windows.
# def test_unicode_filename(pyogg_config: Config):
# filename = str(
# pyogg_config.rootdir
# / "examples/unicode filename 🎵.opus"
# )
# opus_file = pyogg.OpusFile(filename)
def test_as_array(pyogg_config: Config) -> None:
# Load the demonstration file that is exactly 5 seconds long
filename = str(
pyogg_config.rootdir
/ "examples/left-right-demo-5s.opus"
)
opus_file = pyogg.OpusFile(filename)
# Test that the loaded file is indeed 5 seconds long (using
# as_array())
expected_duration_seconds = 5
samples_per_second = opus_file.frequency
expected_duration_samples = (
expected_duration_seconds
* samples_per_second
)
duration_samples = opus_file.as_array().shape[0]
assert duration_samples == expected_duration_samples
def test_as_bytes(pyogg_config: Config) -> None:
# Load the demonstration file that is exactly 5 seconds long
filename = str(
pyogg_config.rootdir
/ "examples/left-right-demo-5s.opus"
)
opus_file = pyogg.OpusFile(filename)
# Test that the loaded file is indeed 5 seconds long (using
# the buffer member variable)
expected_duration_seconds = 5
samples_per_second = opus_file.frequency
channels = opus_file.channels
bytes_per_sample = opus_file.bytes_per_sample
expected_duration_bytes = (
expected_duration_seconds
* samples_per_second
* bytes_per_sample
* channels
)
duration_bytes = len(bytes(opus_file.buffer))
assert duration_bytes == expected_duration_bytes
def test_output_via_wav(pyogg_config: Config) -> None:
# Load the demonstration file that is exactly 5 seconds long
filename = str(
pyogg_config.rootdir
/ "examples/left-right-demo-5s.opus"
)
opus_file = pyogg.OpusFile(filename)
import wave
out_filename = str(
pyogg_config.outdir
/ "test_opus_file__test_output_via_wav.wav"
)
wave_out = wave.open(
out_filename,
"wb"
)
wave_out.setnchannels(opus_file.channels)
wave_out.setsampwidth(opus_file.bytes_per_sample)
wave_out.setframerate(opus_file.frequency)
wave_out.writeframes(opus_file.buffer)
| 2,148 | 0 | 104 |
3de94996b12f40568baf4ce8f866cee063f92f7c | 1,753 | py | Python | src/packet.py | Kushagra-0801/SIR-Is-Reliable | c553fe7ea78c33652451665d45ac8acf475ee869 | [
"MIT"
] | null | null | null | src/packet.py | Kushagra-0801/SIR-Is-Reliable | c553fe7ea78c33652451665d45ac8acf475ee869 | [
"MIT"
] | null | null | null | src/packet.py | Kushagra-0801/SIR-Is-Reliable | c553fe7ea78c33652451665d45ac8acf475ee869 | [
"MIT"
] | 1 | 2021-08-23T03:56:26.000Z | 2021-08-23T03:56:26.000Z | from dataclasses import dataclass
from hashlib import md5
MAX_PACKET_SIZE: int = 64
DATA_SIZE: int = 43
CONTINUATION_PREFIX: bytes = b"\xFE\xFD"
SEQ_LIM = 2**32
FINISHER_DATA = b"TEKCAP TSAL"
@dataclass
| 26.164179 | 65 | 0.564176 | from dataclasses import dataclass
from hashlib import md5
MAX_PACKET_SIZE: int = 64
DATA_SIZE: int = 43
CONTINUATION_PREFIX: bytes = b"\xFE\xFD"
SEQ_LIM = 2**32
FINISHER_DATA = b"TEKCAP TSAL"
def hexdigest_to_bytes(digest: str) -> bytes:
r = b""
assert len(digest) % 2 == 0
for i in range(len(digest) // 2):
a = digest[2 * i]
b = digest[2 * i + 1]
a = int(a, 16)
b = int(b, 16)
c = (a << 4) | b
r += c.to_bytes(1, 'big', signed=False)
return r
@dataclass
class Packet:
seq_no: int
ack: bool
nak: bool
data: bytes
def into_buf(self) -> bytes:
buf = b""
buf += self.seq_no.to_bytes(4, 'big', signed=False)
# print(self.data)
chksm = md5(self.data).hexdigest()
buf += hexdigest_to_bytes(chksm)
anl = 0
if self.ack: anl |= 0b10_00_00_00
if self.nak: anl |= 0b01_00_00_00
anl |= len(self.data)
buf += anl.to_bytes(1, 'big', signed=False)
buf += self.data
buf += b'\0' * (64 - len(buf))
return buf
@staticmethod
def from_buf(buf: bytes):
assert len(buf) == 64
seq_no = int.from_bytes(buf[:4], 'big', signed=False)
chksm = buf[4:20]
anl = buf[20]
ack = (anl & 0b10_00_00_00) != 0
nak = (anl & 0b01_00_00_00) != 0
length = anl & 0b00_11_11_11
data = buf[21:21 + length]
assert chksm == hexdigest_to_bytes(md5(data).hexdigest())
return Packet(seq_no, ack, nak, data)
@staticmethod
def finisher(seq_no: int):
return Packet(seq_no, True, True, FINISHER_DATA)
@staticmethod
def acknowledgment(seq_no: int):
return Packet(seq_no, True, False, b"")
| 1,286 | 214 | 45 |
81bb068987c339b84afdb0f2068e3df134ec4cdc | 7,251 | py | Python | models/test.py | hu-zhiyu/ARCM | ccd2205446fc22786852cc1a1c12bbc5448e8c0a | [
"MIT"
] | 3 | 2020-10-04T02:43:33.000Z | 2021-03-28T16:57:30.000Z | models/test.py | dongbo-BUAA-VR/ARCM | ccd2205446fc22786852cc1a1c12bbc5448e8c0a | [
"MIT"
] | 3 | 2020-09-25T22:39:02.000Z | 2022-02-09T23:40:51.000Z | models/test.py | dongbo-BUAA-VR/ARCM | ccd2205446fc22786852cc1a1c12bbc5448e8c0a | [
"MIT"
] | 1 | 2020-09-21T06:32:46.000Z | 2020-09-21T06:32:46.000Z | import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
from scipy import stats
from IPython import embed
import provider
from model import *
# from test_utils import *
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import indoor3d_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--verbose', action='store_true', help='if specified, output color-coded seg obj files')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--bandwidth', type=float, default=1., help='Bandwidth for meanshift clustering [default: 1.]')
parser.add_argument('--input_list', type=str, default='data/test_hdf5_file_list_Area5.txt', help='Input data list file')
parser.add_argument('--model_path', type=str, default='log/model.ckpt', help='Path of model')
FLAGS = parser.parse_args()
BATCH_SIZE = 1
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
MODEL_PATH = FLAGS.model_path
TEST_FILE_LIST = FLAGS.input_list
BANDWIDTH = FLAGS.bandwidth
output_verbose = FLAGS.verbose
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
OUTPUT_DIR = os.path.join(LOG_DIR, 'test_results')
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
os.system('cp inference_merge.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_inference.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 4096
NUM_CLASSES = 13
NEW_NUM_CLASSES = 13
HOSTNAME = socket.gethostname()
print("ROOTDIR", ROOT_DIR)
ROOM_PATH_LIST = [os.path.join(ROOT_DIR,line.rstrip()) for line in open(os.path.join(ROOT_DIR, FLAGS.input_list))]
len_pts_files = len(ROOM_PATH_LIST)
if __name__ == "__main__":
test()
LOG_FOUT.close()
| 38.983871 | 130 | 0.607089 | import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
from scipy import stats
from IPython import embed
import provider
from model import *
# from test_utils import *
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import indoor3d_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--verbose', action='store_true', help='if specified, output color-coded seg obj files')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--bandwidth', type=float, default=1., help='Bandwidth for meanshift clustering [default: 1.]')
parser.add_argument('--input_list', type=str, default='data/test_hdf5_file_list_Area5.txt', help='Input data list file')
parser.add_argument('--model_path', type=str, default='log/model.ckpt', help='Path of model')
FLAGS = parser.parse_args()
BATCH_SIZE = 1
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
MODEL_PATH = FLAGS.model_path
TEST_FILE_LIST = FLAGS.input_list
BANDWIDTH = FLAGS.bandwidth
output_verbose = FLAGS.verbose
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
OUTPUT_DIR = os.path.join(LOG_DIR, 'test_results')
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
os.system('cp inference_merge.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_inference.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 4096
NUM_CLASSES = 13
NEW_NUM_CLASSES = 13
HOSTNAME = socket.gethostname()
print("ROOTDIR", ROOT_DIR)
ROOM_PATH_LIST = [os.path.join(ROOT_DIR,line.rstrip()) for line in open(os.path.join(ROOT_DIR, FLAGS.input_list))]
len_pts_files = len(ROOM_PATH_LIST)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def test():
with tf.Graph().as_default():
with tf.device('/gpu:' + str(GPU_INDEX)):
pointclouds_pl, sem_labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Get model and loss
pred_sem, end_points = get_model(pointclouds_pl, NUM_CLASSES, is_training_pl, extra_constraint=False)
pred_sem_softmax = tf.nn.softmax(pred_sem)
pred_sem_label = tf.argmax(pred_sem_softmax, axis=2)
loss = get_loss(pred_sem, sem_labels_pl, end_points, False)
loader = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = True
sess = tf.Session(config=config)
is_training = False
# Restore variables from disk.
loader.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'sem_labels_pl': sem_labels_pl,
'is_training_pl': is_training_pl,
'pred_sem_label': pred_sem_label,
'pred_sem_softmax': pred_sem_softmax,
'loss': loss}
total_acc = 0.0
total_seen = 0
output_filelist_f = os.path.join(LOG_DIR, 'output_filelist.txt')
fout_out_filelist = open(output_filelist_f, 'w')
for shape_idx in range(len_pts_files):
room_path = ROOM_PATH_LIST[shape_idx]
log_string('%d / %d ...' % (shape_idx, len_pts_files))
log_string('Loading train file ' + room_path)
out_data_label_filename = os.path.basename(room_path)[:-4] + '_pred.txt'
out_data_label_filename = os.path.join(OUTPUT_DIR, out_data_label_filename)
out_gt_label_filename = os.path.basename(room_path)[:-4] + '_gt.txt'
out_gt_label_filename = os.path.join(OUTPUT_DIR, out_gt_label_filename)
fout_data_label = open(out_data_label_filename, 'w')
fout_gt_label = open(out_gt_label_filename, 'w')
fout_out_filelist.write(out_data_label_filename+'\n')
cur_data, cur_sem, _ = indoor3d_util.room2blocks_wrapper_normalized(room_path, NUM_POINT, block_size=1.0, stride=0.5,
random_sample=False, sample_num=None)
cur_data = cur_data[:, 0:NUM_POINT, :]
cur_sem = np.squeeze(cur_sem)
# Get room dimension..
data_label = np.load(room_path)
data = data_label[:, 0:6]
max_room_x = max(data[:, 0])
max_room_y = max(data[:, 1])
max_room_z = max(data[:, 2])
cur_pred_sem = np.zeros_like(cur_sem)
cur_pred_sem_softmax = np.zeros([cur_sem.shape[0], cur_sem.shape[1], NUM_CLASSES])
num_data = cur_data.shape[0]
for j in range(num_data):
log_string("Processsing: Shape [%d] Block[%d]"%(shape_idx, j))
pts = cur_data[j,...]
sem = cur_sem[j]
feed_dict = {ops['pointclouds_pl']: np.expand_dims(pts, 0),
ops['sem_labels_pl']: np.expand_dims(sem, 0),
ops['is_training_pl']: is_training}
_, pred_sem_label_val, pred_sem_softmax_val = sess.run(
[ops['loss'], ops['pred_sem_label'], ops['pred_sem_softmax']],
feed_dict=feed_dict)
pred_sem = np.squeeze(pred_sem_label_val, axis=0)
pred_sem_softmax = np.squeeze(pred_sem_softmax_val, axis=0)
cur_pred_sem[j, :] = pred_sem
cur_pred_sem_softmax[j, ...] = pred_sem_softmax
total_acc += float(np.sum(pred_sem==sem))/pred_sem.shape[0]
total_seen += 1
seg_pred = cur_pred_sem.reshape(-1)
seg_pred_softmax = cur_pred_sem_softmax.reshape([-1, NUM_CLASSES])
pts = cur_data.reshape([-1, 9])
seg_gt = cur_sem.reshape(-1)
if output_verbose:
pts[:, 6] *= max_room_x
pts[:, 7] *= max_room_y
pts[:, 8] *= max_room_z
pts[:, 3:6] *= 255.0
sem = seg_pred.astype(np.int32)
sem_softmax = seg_pred_softmax
sem_gt = seg_gt
for i in range(pts.shape[0]):
fout_data_label.write('%f %f %f %d %d %d %f %d\n' % (
pts[i, 6], pts[i, 7], pts[i, 8], pts[i, 3], pts[i, 4], pts[i, 5], sem_softmax[i, sem[i]], sem[i]))
fout_gt_label.write('%d\n' % (sem_gt[i]))
fout_data_label.close()
fout_gt_label.close()
fout_out_filelist.close()
if __name__ == "__main__":
test()
LOG_FOUT.close()
| 5,042 | 0 | 50 |
ae1f499ef7f85b5c877640c22a2c55d2bd1d9e38 | 3,629 | py | Python | optimal_number_of_jabs_required_for_each_vaccine_company.py | codemasterady/Analyzing_Vaccines | dbe61d1918b27e53c2441da39b139413a4b62799 | [
"MIT"
] | 1 | 2021-04-28T05:15:50.000Z | 2021-04-28T05:15:50.000Z | optimal_number_of_jabs_required_for_each_vaccine_company.py | codemasterady/Analyzing_Vaccines | dbe61d1918b27e53c2441da39b139413a4b62799 | [
"MIT"
] | null | null | null | optimal_number_of_jabs_required_for_each_vaccine_company.py | codemasterady/Analyzing_Vaccines | dbe61d1918b27e53c2441da39b139413a4b62799 | [
"MIT"
] | null | null | null | # Importing the libraries
import numpy as np
import pandas as pd
import keras
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import time
# Getting the dataset
X = pd.read_csv("2021VAERSDATA.csv").iloc[:5000, 0: 18].values
Y = pd.read_csv("2021VAERSVAX.csv").iloc[:5000, 0: 5].values
# Ultimate Array (Initialization)
total_data = []
# Matching the patients with the paramaters
for i in range(0, len(X)): # Length of X as X is shorter
primary_key_X = X[i, 0]
selected_data = []
for j in range(0, len(Y)):
primary_key_Y = Y[j, 0]
if primary_key_X == primary_key_Y:
selected_data = [primary_key_Y, Y[j, 2], Y[j, 4], X[i, 17]]
total_data.append(selected_data)
# # Preprocessing the input dataset
# total_data = np.array(total_data, dtype=object)
# Splitting independant to dependant
indep_value = []
dep_value = []
for i in range(0, 4930): # len(total_data)):
curr_element = total_data[i]
if len(curr_element) <= 3:
continue
else:
indep_value.append(curr_element[3])
dep_value.append(curr_element[0:3])
# Applying the Machine Learning Model
indep_value = np.array(indep_value).reshape(-1, 1)
dep_value = np.array(dep_value)
training_set = np.concatenate((dep_value, indep_value), axis=1)
# Further Preprocessing of data
# Removing the nan and U
new_training_set = []
for setv in training_set:
if setv[3] == "Y" or setv[3] == "N":
new_training_set.append(setv)
else:
continue
new_training_set = np.array(new_training_set)
training_set = new_training_set
# Processing the Vaccine Types
vac_type_le = LabelEncoder()
training_set[:, 1] = vac_type_le.fit_transform(training_set[:, 1])
re_covid_le = LabelEncoder()
training_set[:, 3] = re_covid_le.fit_transform(training_set[:, 3])
training_set = np.array(training_set)
le = LabelEncoder()
training_set[:, 2] = le.fit_transform(training_set[:, 2])
# Removing the patient ID
training_set = training_set[:, 1:]
# Extracting the labels
vac_type_label_mapping = dict(
zip(vac_type_le.classes_, vac_type_le.transform(vac_type_le.classes_)))
re_covid_label_mapping = dict(
zip(re_covid_le.classes_, re_covid_le.transform(re_covid_le.classes_)))
le_mapping = dict(
zip(le.classes_, le.transform(le.classes_)))
# Converting values to dep and indep
dep_training = training_set[:, 1].reshape(-1, 1) # !Y
indep_training = np.concatenate(
(training_set[:, 0].reshape(-1, 1), training_set[:, 2].reshape(-1, 1)), axis=1) # !X
# Scaling the indep data
sc = StandardScaler()
sc.fit_transform(indep_training)
print(indep_training.shape)
# Applying the Machine Learning model
nn = Sequential()
# input_shape=(indep_training.shape)))
nn.add(Dense(units=120, activation='relu'))
nn.add(Dense(units=60, activation='relu'))
# nn.add(Dense(units=15, activation='relu'))
nn.add(Dense(units=1))
nn.compile(optimizer="adam", loss="mse",
metrics=[tf.keras.metrics.MeanSquaredError()])
nn.fit(indep_training, dep_training, batch_size=100, epochs=4000)
# # Printing the Labels
print(le_mapping)
print(vac_type_label_mapping)
print(re_covid_label_mapping)
#! Moderna (2)
moderna_optim = nn.predict_classes(np.array(([2, 0], [2, 1])))
print(moderna_optim)
#! Pfizer (2)
pfizer_optim = nn.predict_classes(np.array(([3, 0], [3, 1])))
print(pfizer_optim)
| 31.284483 | 90 | 0.702397 | # Importing the libraries
import numpy as np
import pandas as pd
import keras
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import time
# Getting the dataset
X = pd.read_csv("2021VAERSDATA.csv").iloc[:5000, 0: 18].values
Y = pd.read_csv("2021VAERSVAX.csv").iloc[:5000, 0: 5].values
# Ultimate Array (Initialization)
total_data = []
# Matching the patients with the paramaters
for i in range(0, len(X)): # Length of X as X is shorter
primary_key_X = X[i, 0]
selected_data = []
for j in range(0, len(Y)):
primary_key_Y = Y[j, 0]
if primary_key_X == primary_key_Y:
selected_data = [primary_key_Y, Y[j, 2], Y[j, 4], X[i, 17]]
total_data.append(selected_data)
# # Preprocessing the input dataset
# total_data = np.array(total_data, dtype=object)
# Splitting independant to dependant
indep_value = []
dep_value = []
for i in range(0, 4930): # len(total_data)):
curr_element = total_data[i]
if len(curr_element) <= 3:
continue
else:
indep_value.append(curr_element[3])
dep_value.append(curr_element[0:3])
# Applying the Machine Learning Model
indep_value = np.array(indep_value).reshape(-1, 1)
dep_value = np.array(dep_value)
training_set = np.concatenate((dep_value, indep_value), axis=1)
# Further Preprocessing of data
# Removing the nan and U
new_training_set = []
for setv in training_set:
if setv[3] == "Y" or setv[3] == "N":
new_training_set.append(setv)
else:
continue
new_training_set = np.array(new_training_set)
training_set = new_training_set
# Processing the Vaccine Types
vac_type_le = LabelEncoder()
training_set[:, 1] = vac_type_le.fit_transform(training_set[:, 1])
re_covid_le = LabelEncoder()
training_set[:, 3] = re_covid_le.fit_transform(training_set[:, 3])
training_set = np.array(training_set)
le = LabelEncoder()
training_set[:, 2] = le.fit_transform(training_set[:, 2])
# Removing the patient ID
training_set = training_set[:, 1:]
# Extracting the labels
vac_type_label_mapping = dict(
zip(vac_type_le.classes_, vac_type_le.transform(vac_type_le.classes_)))
re_covid_label_mapping = dict(
zip(re_covid_le.classes_, re_covid_le.transform(re_covid_le.classes_)))
le_mapping = dict(
zip(le.classes_, le.transform(le.classes_)))
# Converting values to dep and indep
dep_training = training_set[:, 1].reshape(-1, 1) # !Y
indep_training = np.concatenate(
(training_set[:, 0].reshape(-1, 1), training_set[:, 2].reshape(-1, 1)), axis=1) # !X
# Scaling the indep data
sc = StandardScaler()
sc.fit_transform(indep_training)
print(indep_training.shape)
# Applying the Machine Learning model
nn = Sequential()
# input_shape=(indep_training.shape)))
nn.add(Dense(units=120, activation='relu'))
nn.add(Dense(units=60, activation='relu'))
# nn.add(Dense(units=15, activation='relu'))
nn.add(Dense(units=1))
nn.compile(optimizer="adam", loss="mse",
metrics=[tf.keras.metrics.MeanSquaredError()])
nn.fit(indep_training, dep_training, batch_size=100, epochs=4000)
# # Printing the Labels
print(le_mapping)
print(vac_type_label_mapping)
print(re_covid_label_mapping)
#! Moderna (2)
moderna_optim = nn.predict_classes(np.array(([2, 0], [2, 1])))
print(moderna_optim)
#! Pfizer (2)
pfizer_optim = nn.predict_classes(np.array(([3, 0], [3, 1])))
print(pfizer_optim)
| 0 | 0 | 0 |
0df3a66e7fb88409a0e1ed68254b7aa79122a6e9 | 442 | py | Python | okie/tasks/urls.py | immortal-zeus/Django-task_dis | 10e99997893d4aabc2305969894e49f2873c17b7 | [
"BSD-3-Clause"
] | 1 | 2021-06-22T06:31:56.000Z | 2021-06-22T06:31:56.000Z | okie/tasks/urls.py | immortal-zeus/Django-task_dis | 10e99997893d4aabc2305969894e49f2873c17b7 | [
"BSD-3-Clause"
] | null | null | null | okie/tasks/urls.py | immortal-zeus/Django-task_dis | 10e99997893d4aabc2305969894e49f2873c17b7 | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path, include
from . import views
urlpatterns=[
path("" , views.index, name="index"),
path("signin", views.signin , name="signin" ),
path("login", views.login , name="login"),
path("logout", views.logout , name="logout"),
path("taskcreation", views.task_creation , name="creation"),
path("taskdisplay", views.task_display , name="display"),
path("zzincrezz", views.increase , name="inc")
] | 36.833333 | 64 | 0.660633 | from django.urls import path, include
from . import views
urlpatterns=[
path("" , views.index, name="index"),
path("signin", views.signin , name="signin" ),
path("login", views.login , name="login"),
path("logout", views.logout , name="logout"),
path("taskcreation", views.task_creation , name="creation"),
path("taskdisplay", views.task_display , name="display"),
path("zzincrezz", views.increase , name="inc")
] | 0 | 0 | 0 |
0443ca866f0b8931321991052bcaad04e1392794 | 1,923 | py | Python | setup.py | WillFr/restlax | ec47617d915094137077f641427976f04acd8d47 | [
"Apache-2.0"
] | 1 | 2019-07-03T16:29:05.000Z | 2019-07-03T16:29:05.000Z | setup.py | WillFr/restlax | ec47617d915094137077f641427976f04acd8d47 | [
"Apache-2.0"
] | null | null | null | setup.py | WillFr/restlax | ec47617d915094137077f641427976f04acd8d47 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import logging
import sys
try:
import multiprocessing
except:
pass
# nose requires multiprocessing and logging to be initialized before the setup
# call, or we'll get a spurious crash on exit.
from setuptools import setup, find_packages
from setuptools.dist import Distribution
is_release = False
if "--release" in sys.argv:
is_release = True
sys.argv.remove("--release")
base = os.path.dirname(os.path.abspath(__file__))
def read(fname):
'''Utility function to read the README file.'''
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# figure out what the install will need
install_requires = [
"setuptools >=0.5",
"flask",
"pyyaml",
"requests",
"schematics",
"python-dateutil",
"requests-futures",
"httpretty",
"aiohttp>=2.3.0",
"aiotask_context",
"pytest-aiohttp",
"pytest-asyncio",
'cryptography',
'python-jose[cryptography]',
'jinja2'
]
setup(
name="rest-helpers",
setup_requires=["vcver"],
vcver={
"is_release": is_release,
"path": base
},
url="https://github.com/WillFr/rest-helpers",
author="Guillaume Grosbois",
author_email="grosbois.guillaume@gmail.com",
description="A set of method to help creating rest services",
packages=find_packages(),
long_description=read('README.md'),
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.5",
"Operating System :: OS Independent"],
package_data={"rest_helpers": ["templates/swagger-ui.html"]},
install_requires=install_requires,
include_package_data=True,
tests_require=[ "mock >=0.7.2",
"coverage",
"httpretty",
"httmock",
"pytest-aiohttp",
"pytest-cov"] + install_requires
)
| 26.342466 | 78 | 0.637546 | #!/usr/bin/env python3
import os
import logging
import sys
try:
import multiprocessing
except:
pass
# nose requires multiprocessing and logging to be initialized before the setup
# call, or we'll get a spurious crash on exit.
from setuptools import setup, find_packages
from setuptools.dist import Distribution
is_release = False
if "--release" in sys.argv:
is_release = True
sys.argv.remove("--release")
base = os.path.dirname(os.path.abspath(__file__))
def read(fname):
'''Utility function to read the README file.'''
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# figure out what the install will need
install_requires = [
"setuptools >=0.5",
"flask",
"pyyaml",
"requests",
"schematics",
"python-dateutil",
"requests-futures",
"httpretty",
"aiohttp>=2.3.0",
"aiotask_context",
"pytest-aiohttp",
"pytest-asyncio",
'cryptography',
'python-jose[cryptography]',
'jinja2'
]
setup(
name="rest-helpers",
setup_requires=["vcver"],
vcver={
"is_release": is_release,
"path": base
},
url="https://github.com/WillFr/rest-helpers",
author="Guillaume Grosbois",
author_email="grosbois.guillaume@gmail.com",
description="A set of method to help creating rest services",
packages=find_packages(),
long_description=read('README.md'),
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.5",
"Operating System :: OS Independent"],
package_data={"rest_helpers": ["templates/swagger-ui.html"]},
install_requires=install_requires,
include_package_data=True,
tests_require=[ "mock >=0.7.2",
"coverage",
"httpretty",
"httmock",
"pytest-aiohttp",
"pytest-cov"] + install_requires
)
| 0 | 0 | 0 |
a6d0b99583a05222631b580cf92a0e31f2a809a1 | 930 | py | Python | gbvision/gui/recording_window.py | GreenBlitz/GBVision | 7b6f1dfc09e28ea1e5e771af9cb222412d71c7bb | [
"Apache-2.0"
] | 16 | 2019-04-15T18:52:58.000Z | 2022-02-13T23:00:46.000Z | gbvision/gui/recording_window.py | GreenBlitz/GBVision | 7b6f1dfc09e28ea1e5e771af9cb222412d71c7bb | [
"Apache-2.0"
] | 2 | 2019-04-15T19:00:05.000Z | 2019-04-19T15:47:21.000Z | gbvision/gui/recording_window.py | GreenBlitz/GBVision | 7b6f1dfc09e28ea1e5e771af9cb222412d71c7bb | [
"Apache-2.0"
] | 3 | 2019-05-03T13:48:25.000Z | 2019-09-22T14:03:49.000Z | import abc
from .window import Window
from gbvision.models.system import EMPTY_PIPELINE
from gbvision.utils.recorders.recorder import Recorder
class RecordingWindow(Window, abc.ABC):
"""
A basic window that records the stream it receives
:param recording_pipeline: a drawing pipeline to run on the recorded frame, usually you will want this to be the
same as the drawing pipeline
"""
| 33.214286 | 116 | 0.730108 | import abc
from .window import Window
from gbvision.models.system import EMPTY_PIPELINE
from gbvision.utils.recorders.recorder import Recorder
class RecordingWindow(Window, abc.ABC):
"""
A basic window that records the stream it receives
:param recording_pipeline: a drawing pipeline to run on the recorded frame, usually you will want this to be the
same as the drawing pipeline
"""
def __init__(self, window_name: str, recorder: Recorder, drawing_pipeline=EMPTY_PIPELINE,
recording_pipeline=EMPTY_PIPELINE):
Window.__init__(self, window_name=window_name, drawing_pipeline=drawing_pipeline)
self.recording_pipeline = recording_pipeline
self.recorder = recorder
def show_frame(self, frame):
self.recorder.record(self.recording_pipeline(frame))
return Window.show_frame(self, frame)
def _close(self):
self.recorder.close()
| 437 | 0 | 81 |
13dd397cee2f4d60fd9c511b4a2de5d07bae427e | 419 | py | Python | PaycomUz/admin.py | zuojilei/PaycomUz | 347d679d636130c7067bda559cc6e68b8e530bac | [
"MIT"
] | null | null | null | PaycomUz/admin.py | zuojilei/PaycomUz | 347d679d636130c7067bda559cc6e68b8e530bac | [
"MIT"
] | null | null | null | PaycomUz/admin.py | zuojilei/PaycomUz | 347d679d636130c7067bda559cc6e68b8e530bac | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Transaction
# Register your models here.
admin.site.register(Transaction,TransactionAdmin) | 34.916667 | 93 | 0.704057 | from django.contrib import admin
from .models import Transaction
# Register your models here.
class TransactionAdmin(admin.ModelAdmin):
list_display = ('id', '_id', 'order_id', 'order_type', 'amount', 'status','paid' ,'date')
list_display_links = ('id',)
list_filter = ('status', 'paid')
search_fields = ['order_id', 'status','order_type','id','_id']
admin.site.register(Transaction,TransactionAdmin) | 0 | 251 | 23 |
4c4066c2941dc5990d31373c681a20210815fa33 | 403 | py | Python | tests/app/template_arg_test.py | Zsailer/voila | f523a7e5bacfe9f5757b5d50c64289774f84b96f | [
"BSD-3-Clause"
] | null | null | null | tests/app/template_arg_test.py | Zsailer/voila | f523a7e5bacfe9f5757b5d50c64289774f84b96f | [
"BSD-3-Clause"
] | 2 | 2021-05-10T09:49:48.000Z | 2022-01-22T08:48:42.000Z | tests/app/template_arg_test.py | Zsailer/voila | f523a7e5bacfe9f5757b5d50c64289774f84b96f | [
"BSD-3-Clause"
] | 1 | 2019-09-09T03:30:53.000Z | 2019-09-09T03:30:53.000Z | # tests the --template argument of voila
import pytest
@pytest.fixture
@pytest.mark.gen_test
| 25.1875 | 63 | 0.734491 | # tests the --template argument of voila
import pytest
@pytest.fixture
def voila_args_extra():
return ['--template=test_template']
@pytest.mark.gen_test
def test_template(http_client, base_url):
response = yield http_client.fetch(base_url)
assert response.code == 200
assert 'test_template.css' in response.body.decode('utf-8')
assert 'Hi Voila' in response.body.decode('utf-8')
| 262 | 0 | 44 |
3044c9450f5d3f2c21d0bcc60f6c5651d8a4a9a9 | 163 | py | Python | python/testData/inspections/PyAbstractClassInspection/quickFix/SetImportedABCMetaAsMetaclassPy3/main_after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyAbstractClassInspection/quickFix/SetImportedABCMetaAsMetaclassPy3/main_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/inspections/PyAbstractClassInspection/quickFix/SetImportedABCMetaAsMetaclassPy3/main_after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | from abc import ABCMeta, abstractmethod | 18.111111 | 39 | 0.693252 | from abc import ABCMeta, abstractmethod
class A1(metaclass=ABCMeta):
@abstractmethod
def m1(self):
pass
class A2(A1, metaclass=ABCMeta):
pass | 5 | 73 | 46 |
81f54154e807884331a884413b5531c2cc3181f7 | 8,750 | py | Python | lib/taurus/qt/qtgui/display/qpixmapwidget.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | null | null | null | lib/taurus/qt/qtgui/display/qpixmapwidget.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | 1 | 2020-02-28T16:36:04.000Z | 2020-03-02T07:51:12.000Z | lib/taurus/qt/qtgui/display/qpixmapwidget.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This module contains a pure Qt widget that displays an image"""
from __future__ import absolute_import
from taurus.external.qt import Qt
__all__ = ["QPixmapWidget"]
__docformat__ = 'restructuredtext'
class QPixmapWidget(Qt.QWidget):
"""This widget displays an image (pixmap). By default the pixmap is
scaled to the widget size and the aspect ratio is kept.
The default alignment of the pixmap inside the widget space is horizontal
left, vertical center."""
DefaultAlignment = Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter
DefaultAspectRatioMode = Qt.Qt.KeepAspectRatio
DefaultTransformationMode = Qt.Qt.SmoothTransformation
def paintEvent(self, paintEvent):
"""Overwrite the paintEvent from QWidget to draw the pixmap"""
pixmap = self._getPixmap()
w, h = self.width(), self.height()
painter = Qt.QPainter(self)
painter.setRenderHint(Qt.QPainter.Antialiasing)
pw, ph = pixmap.width(), pixmap.height()
align = self._alignment
hAlign = align & Qt.Qt.AlignHorizontal_Mask
vAlign = align & Qt.Qt.AlignVertical_Mask
x, y = 0, 0
if hAlign & Qt.Qt.AlignHCenter:
x = (w - pw) // 2
elif hAlign & Qt.Qt.AlignRight:
x = w - pw
if vAlign & Qt.Qt.AlignVCenter:
y = (h - ph) // 2
elif vAlign & Qt.Qt.AlignBottom:
y = h - ph
x, y = max(0, x), max(0, y)
painter.drawPixmap(x, y, pixmap)
@classmethod
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# QT property definition
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def getPixmap(self):
"""Returns the pixmap.Returns None if no pixmap is set.
:return: the current pixmap
:rtype: PyQt4.Qt.QPixmap"""
return self._pixmap
def setPixmap(self, pixmap):
"""Sets the pixmap for this widget. Setting it to None disables pixmap
:param pixmap: the new pixmap
:type pixmap: PyQt4.Qt.QPixmap"""
# make sure to make a copy because of bug in PyQt 4.4. This is actually
# copying the internal bitmap, just the qpixmap, so there is no performance
# penalty here
self._pixmap = Qt.QPixmap(pixmap)
self._setDirty()
self.update()
def resetPixmap(self):
"""Resets the pixmap for this widget."""
self.setPixmap(Qt.QPixmap())
def getAspectRatioMode(self):
"""Returns the aspect ratio to apply when drawing the pixmap.
:return: the current aspect ratio
:rtype: PyQt4.Qt.AspectRatioMode"""
return self._pixmapAspectRatioMode
def setAspectRatioMode(self, aspect):
"""Sets the aspect ratio mode to apply when drawing the pixmap.
:param pixmap: the new aspect ratio mode
:type pixmap: PyQt4.Qt.AspectRatioMode"""
self._pixmapAspectRatioMode = aspect
self._setDirty()
self.update()
def resetAspectRatioMode(self):
"""Resets the aspect ratio mode to KeepAspectRatio"""
self.setAspectRatioMode(self.DefaultAspectRatioMode)
def getTransformationMode(self):
"""Returns the transformation mode to apply when drawing the pixmap.
:return: the current transformation mode
:rtype: PyQt4.Qt.TransformationMode"""
return self._pixmapTransformationMode
def setTransformationMode(self, transformation):
"""Sets the transformation mode to apply when drawing the pixmap.
:param pixmap: the new transformation mode
:type pixmap: PyQt4.Qt.TransformationMode"""
self._pixmapTransformationMode = transformation
self._setDirty()
self.update()
def resetTransformationMode(self):
"""Resets the transformation mode to SmoothTransformation"""
self.setTransformationMode(self.DefaultTransformationMode)
def getAlignment(self):
"""Returns the alignment to apply when drawing the pixmap.
:return: the current alignment
:rtype: PyQt4.Qt.Alignment"""
return self._alignment
def setAlignment(self, alignment):
"""Sets the alignment to apply when drawing the pixmap.
:param pixmap: the new alignment
:type pixmap: PyQt4.Qt.Alignment"""
self._alignment = Qt.Qt.Alignment(alignment)
self.update()
def resetAlignment(self):
"""Resets the transformation mode to Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter"""
self.setAlignment(self.DefaultAlignment)
#: This property holds the widget's pixmap
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getPixmap`
#: * :meth:`QPixmapWidget.setPixmap`
#: * :meth:`QPixmapWidget.resetLedStatus`
pixmap = Qt.pyqtProperty("QPixmap", getPixmap, setPixmap,
resetPixmap, doc="the widget's pixmap")
#: This property holds the widget's pixmap aspect ratio mode
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getAspectRatioMode`
#: * :meth:`QPixmapWidget.setAspectRatioMode`
#: * :meth:`QPixmapWidget.resetAspectRatioMode`
aspectRatioMode = Qt.pyqtProperty("Qt::AspectRatioMode", getAspectRatioMode,
setAspectRatioMode, resetAspectRatioMode,
doc="the widget's pixmap aspect ratio mode")
#: This property holds the widget's pixmap transformation mode
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getTransformationMode`
#: * :meth:`QPixmapWidget.setTransformationMode`
#: * :meth:`QPixmapWidget.resetTransformationMode`
transformationMode = Qt.pyqtProperty("Qt::TransformationMode", getTransformationMode,
setTransformationMode, resetTransformationMode,
doc="the widget's pixmap transformation mode")
#: This property holds the widget's pixmap alignment
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getAlignment`
#: * :meth:`QPixmapWidget.setAlignment`
#: * :meth:`QPixmapWidget.resetAlignment`
alignment = Qt.pyqtProperty("Qt::Alignment", getAlignment, setAlignment,
resetAlignment, doc="the widget's pixmap alignment")
def demo():
"QPixmap Widget"
from .demo import qpixmapwidgetdemo # after futurize stage1
return qpixmapwidgetdemo.main()
if __name__ == "__main__":
main()
| 36.307054 | 89 | 0.623771 | #!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This module contains a pure Qt widget that displays an image"""
from __future__ import absolute_import
from taurus.external.qt import Qt
__all__ = ["QPixmapWidget"]
__docformat__ = 'restructuredtext'
class QPixmapWidget(Qt.QWidget):
"""This widget displays an image (pixmap). By default the pixmap is
scaled to the widget size and the aspect ratio is kept.
The default alignment of the pixmap inside the widget space is horizontal
left, vertical center."""
DefaultAlignment = Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter
DefaultAspectRatioMode = Qt.Qt.KeepAspectRatio
DefaultTransformationMode = Qt.Qt.SmoothTransformation
def __init__(self, parent=None, designMode=False):
self._pixmap = Qt.QPixmap()
self._pixmapDrawn = None
self._alignment = self.DefaultAlignment
self._pixmapAspectRatioMode = self.DefaultAspectRatioMode
self._pixmapTransformationMode = self.DefaultTransformationMode
Qt.QWidget.__init__(self, parent)
def _getPixmap(self):
if self._pixmapDrawn is None:
self._pixmapDrawn = self.recalculatePixmap()
return self._pixmapDrawn
def recalculatePixmap(self):
origPixmap = self._pixmap
if origPixmap.isNull():
return origPixmap
return origPixmap.scaled(self.size(), self._pixmapAspectRatioMode,
self._pixmapTransformationMode)
def _setDirty(self):
self._pixmapDrawn = None
def paintEvent(self, paintEvent):
"""Overwrite the paintEvent from QWidget to draw the pixmap"""
pixmap = self._getPixmap()
w, h = self.width(), self.height()
painter = Qt.QPainter(self)
painter.setRenderHint(Qt.QPainter.Antialiasing)
pw, ph = pixmap.width(), pixmap.height()
align = self._alignment
hAlign = align & Qt.Qt.AlignHorizontal_Mask
vAlign = align & Qt.Qt.AlignVertical_Mask
x, y = 0, 0
if hAlign & Qt.Qt.AlignHCenter:
x = (w - pw) // 2
elif hAlign & Qt.Qt.AlignRight:
x = w - pw
if vAlign & Qt.Qt.AlignVCenter:
y = (h - ph) // 2
elif vAlign & Qt.Qt.AlignBottom:
y = h - ph
x, y = max(0, x), max(0, y)
painter.drawPixmap(x, y, pixmap)
def resizeEvent(self, event):
self._setDirty()
return Qt.QWidget.resizeEvent(self, event)
@classmethod
def getQtDesignerPluginInfo(cls):
return {
'module': 'taurus.qt.qtgui.display',
'group': 'Taurus Display',
'icon': "designer:graphicsview.png",
'container': False,
}
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# QT property definition
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def getPixmap(self):
"""Returns the pixmap.Returns None if no pixmap is set.
:return: the current pixmap
:rtype: PyQt4.Qt.QPixmap"""
return self._pixmap
def setPixmap(self, pixmap):
"""Sets the pixmap for this widget. Setting it to None disables pixmap
:param pixmap: the new pixmap
:type pixmap: PyQt4.Qt.QPixmap"""
# make sure to make a copy because of bug in PyQt 4.4. This is actually
# copying the internal bitmap, just the qpixmap, so there is no performance
# penalty here
self._pixmap = Qt.QPixmap(pixmap)
self._setDirty()
self.update()
def resetPixmap(self):
"""Resets the pixmap for this widget."""
self.setPixmap(Qt.QPixmap())
def getAspectRatioMode(self):
"""Returns the aspect ratio to apply when drawing the pixmap.
:return: the current aspect ratio
:rtype: PyQt4.Qt.AspectRatioMode"""
return self._pixmapAspectRatioMode
def setAspectRatioMode(self, aspect):
"""Sets the aspect ratio mode to apply when drawing the pixmap.
:param pixmap: the new aspect ratio mode
:type pixmap: PyQt4.Qt.AspectRatioMode"""
self._pixmapAspectRatioMode = aspect
self._setDirty()
self.update()
def resetAspectRatioMode(self):
"""Resets the aspect ratio mode to KeepAspectRatio"""
self.setAspectRatioMode(self.DefaultAspectRatioMode)
def getTransformationMode(self):
"""Returns the transformation mode to apply when drawing the pixmap.
:return: the current transformation mode
:rtype: PyQt4.Qt.TransformationMode"""
return self._pixmapTransformationMode
def setTransformationMode(self, transformation):
"""Sets the transformation mode to apply when drawing the pixmap.
:param pixmap: the new transformation mode
:type pixmap: PyQt4.Qt.TransformationMode"""
self._pixmapTransformationMode = transformation
self._setDirty()
self.update()
def resetTransformationMode(self):
"""Resets the transformation mode to SmoothTransformation"""
self.setTransformationMode(self.DefaultTransformationMode)
def getAlignment(self):
"""Returns the alignment to apply when drawing the pixmap.
:return: the current alignment
:rtype: PyQt4.Qt.Alignment"""
return self._alignment
def setAlignment(self, alignment):
"""Sets the alignment to apply when drawing the pixmap.
:param pixmap: the new alignment
:type pixmap: PyQt4.Qt.Alignment"""
self._alignment = Qt.Qt.Alignment(alignment)
self.update()
def resetAlignment(self):
"""Resets the transformation mode to Qt.Qt.AlignLeft | Qt.Qt.AlignVCenter"""
self.setAlignment(self.DefaultAlignment)
#: This property holds the widget's pixmap
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getPixmap`
#: * :meth:`QPixmapWidget.setPixmap`
#: * :meth:`QPixmapWidget.resetLedStatus`
pixmap = Qt.pyqtProperty("QPixmap", getPixmap, setPixmap,
resetPixmap, doc="the widget's pixmap")
#: This property holds the widget's pixmap aspect ratio mode
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getAspectRatioMode`
#: * :meth:`QPixmapWidget.setAspectRatioMode`
#: * :meth:`QPixmapWidget.resetAspectRatioMode`
aspectRatioMode = Qt.pyqtProperty("Qt::AspectRatioMode", getAspectRatioMode,
setAspectRatioMode, resetAspectRatioMode,
doc="the widget's pixmap aspect ratio mode")
#: This property holds the widget's pixmap transformation mode
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getTransformationMode`
#: * :meth:`QPixmapWidget.setTransformationMode`
#: * :meth:`QPixmapWidget.resetTransformationMode`
transformationMode = Qt.pyqtProperty("Qt::TransformationMode", getTransformationMode,
setTransformationMode, resetTransformationMode,
doc="the widget's pixmap transformation mode")
#: This property holds the widget's pixmap alignment
#:
#: **Access functions:**
#:
#: * :meth:`QPixmapWidget.getAlignment`
#: * :meth:`QPixmapWidget.setAlignment`
#: * :meth:`QPixmapWidget.resetAlignment`
alignment = Qt.pyqtProperty("Qt::Alignment", getAlignment, setAlignment,
resetAlignment, doc="the widget's pixmap alignment")
def demo():
"QPixmap Widget"
from .demo import qpixmapwidgetdemo # after futurize stage1
return qpixmapwidgetdemo.main()
def main():
return demo()
if __name__ == "__main__":
main()
| 1,030 | 0 | 184 |
4f39c735bd9317835a4ebc227e9c56bb001995cb | 10,516 | py | Python | tests/xpath/test_abbreviations.py | gabrielfalcao/dominic | a42f418fc288f3b70cb95847b405eaf7b83bb3a0 | [
"MIT"
] | 2 | 2021-01-28T10:21:31.000Z | 2021-04-22T08:39:49.000Z | tests/xpath/test_abbreviations.py | gabrielfalcao/dominic | a42f418fc288f3b70cb95847b405eaf7b83bb3a0 | [
"MIT"
] | null | null | null | tests/xpath/test_abbreviations.py | gabrielfalcao/dominic | a42f418fc288f3b70cb95847b405eaf7b83bb3a0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import unittest
import xml.dom.minidom
from dominic import xpath
class TestAbbreviations(unittest.TestCase):
"""Section 2.5: Abbreviated Syntax"""
if __name__ == '__main__':
unittest.main()
| 36.641115 | 78 | 0.451788 | #!/usr/bin/env python
import unittest
import xml.dom.minidom
from dominic import xpath
class TestAbbreviations(unittest.TestCase):
"""Section 2.5: Abbreviated Syntax"""
def test_para_children(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" />
<div id="2" />
<para id="3" />
</doc>
""").documentElement
result = xpath.find('para', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1", "3"])
def test_all_children(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" />
<div id="2" />
<para id="3" />
</doc>
""").documentElement
result = xpath.find('*', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1", "2", "3"])
def test_text_children(self):
doc = xml.dom.minidom.parseString("""
<doc>This is <i>some</i> text.</doc>
""").documentElement
result = xpath.find('text()', doc)
self.failUnlessEqual([x.data for x in result],
["This is ", " text."])
def test_named_attribute(self):
doc = xml.dom.minidom.parseString("""
<doc name="foo" value="bar" />
""").documentElement
result = xpath.find('@name', doc)
self.failUnlessEqual([(x.name, x.value) for x in result],
[('name', 'foo')])
def test_all_attributes(self):
doc = xml.dom.minidom.parseString("""
<doc name="foo" value="bar" />
""").documentElement
result = xpath.find('@*', doc)
self.failUnlessEqual([(x.name, x.value) for x in result],
[('name', 'foo'), ('value', 'bar')])
def test_first_child(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" /><para id="2" /><para id="3" />
</doc>
""").documentElement
result = xpath.find('para[1]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1"])
def test_last_child(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" /><para id="2" /><para id="3" />
</doc>
""").documentElement
result = xpath.find('para[last()]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["3"])
def test_grandchildren(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter><para id="1" /><para id="2" /></chapter>
<section><para id="3" /><sub><para id="4" /></sub></section>
<para id="4" />
</doc>
""").documentElement
result = xpath.find('*/para', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1", "2", "3"])
def test_section_5_2(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter id="1" /><chapter id="2" /><chapter id="3" />
<chapter id="4">
<section id="4.1" /><section id="4.2" /><section id="4.3" />
</chapter>
<chapter id="5">
<section id="5.1" /><section id="5.2" /><section id="5.3" />
</chapter>
</doc>
""").documentElement
result = xpath.find('/doc/chapter[5]/section[2]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["5.2"])
def test_child_descendant(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter><para id="1" /><para id="2" /></chapter>
<chapter><section><para id="3" /></section></chapter>
<para id="4" />
</doc>
""").documentElement
result = xpath.find('chapter//para', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1", "2", "3"])
def test_absolute_descendant_or_self(self):
doc = xml.dom.minidom.parseString("""
<para id="0">
<div id="1" />
<para id="2">
<para id="3" />
</para>
</para>
""").documentElement
node = xpath.findnode('//para[@id="2"]', doc)
result = xpath.find('//para', node)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["0", "2", "3"])
def test_olist_item(self):
doc = xml.dom.minidom.parseString("""
<doc>
<item id="1">
<context />
<olist><item id="2" /></olist>
</item>
<olist><item id="3" /></olist>
</doc>
""").documentElement
node = xpath.findnode('//context', doc)
result = xpath.find('//olist/item', node)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["2", "3"])
def test_self(self):
doc = xml.dom.minidom.parseString("""
<doc id="0">
<para id="1"/>
</doc>
""").documentElement
result = xpath.find('.', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["0"])
def test_relative_descendant_or_self(self):
doc = xml.dom.minidom.parseString("""
<para id="0">
<div id="1" />
<para id="2">
<para id="3" />
</para>
</para>
""").documentElement
node = xpath.findnode('//para[@id="2"]', doc)
result = xpath.find('.//para', node)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["3"])
def test_parent(self):
doc = xml.dom.minidom.parseString("""
<doc id="0">
<chapter id="1">
<item id="2" />
<item id="3"><subitem id="4" /></item>
</chapter>
</doc>
""").documentElement
node = xpath.findnode('//item[@id="3"]', doc)
result = xpath.find('..', node)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1"])
def test_parent_attr(self):
doc = xml.dom.minidom.parseString("""
<doc id="0">
<chapter id="1" lang="en">
<item id="2" />
<item id="3"><subitem id="4" /></item>
</chapter>
</doc>
""").documentElement
node = xpath.findnode('//item[@id="3"]', doc)
result = xpath.find('../@lang', node)
self.failUnlessEqual([x.value for x in result],
["en"])
def test_attr_equal(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" type="info" />
<para id="2" type="warning" />
<para id="3" type="warning" />
<para id="4" type="error" />
</doc>
""").documentElement
result = xpath.find('para[@type="warning"]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["2", "3"])
def test_fifth_warning(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" type="info" />
<para id="2" type="warning" />
<para id="3" type="warning" />
<para id="4" type="warning" />
<para id="5" type="error" />
<para id="6" type="warning" />
<para id="7" type="warning" />
</doc>
""").documentElement
result = xpath.find(
'para[@type="warning"][5]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["7"])
def test_fifth_if_warning(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" type="info" />
<para id="2" type="warning" />
<para id="3" type="warning" />
<para id="4" type="warning" />
<para id="5" type="error" />
<para id="6" type="warning" />
<para id="7" type="warning" />
</doc>
""").documentElement
result = xpath.find(
'para[5][@type="warning"]', doc)
self.failUnlessEqual(result, [])
def test_introductions(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter id="1" />
<chapter id="2"><title>Introduction</title></chapter>
<chapter id="3"><title>Body</title></chapter>
<chapter id="4">
<title>Another</title>
<title>Introduction</title>
</chapter>
</doc>
""").documentElement
result = xpath.find("chapter[title='Introduction']", doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["2", "4"])
def test_titles(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter id="1" />
<chapter id="2"><title /></chapter>
<chapter id="3"><title /><title /></chapter>
</doc>
""").documentElement
result = xpath.find("chapter[title]", doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["2", "3"])
def test_secretary_and_assistant(self):
doc = xml.dom.minidom.parseString("""
<doc>
<employee name="Alice" />
<employee name="Bob" secretary="Cathy" />
<employee name="Dianne" secretary="Edward" assistant="Fran" />
</doc>
""").documentElement
result = xpath.find("employee[@secretary and @assistant]", doc)
self.failUnlessEqual([x.getAttribute("name") for x in result],
["Dianne"])
if __name__ == '__main__':
unittest.main()
| 9,699 | 0 | 594 |
160c18131c3c7476a6e988df71b5c034c08dae47 | 1,162 | py | Python | rdict/meldict.py | danse-macabre/rdict | 78f4a74e82b725063793e0c580245bcaa8355da4 | [
"MIT"
] | null | null | null | rdict/meldict.py | danse-macabre/rdict | 78f4a74e82b725063793e0c580245bcaa8355da4 | [
"MIT"
] | null | null | null | rdict/meldict.py | danse-macabre/rdict | 78f4a74e82b725063793e0c580245bcaa8355da4 | [
"MIT"
] | null | null | null | from itertools import tee, zip_longest
from .rangedict import RangeDict
def pairwise(iterable, longest=True):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return (zip_longest if longest else zip)(a, b)
| 27.023256 | 85 | 0.576592 | from itertools import tee, zip_longest
from .rangedict import RangeDict
class MeldDict(RangeDict):
def occupy(self, start, end, value, **meld_opts):
overlapped = list(self.irange(start, end))
if not overlapped:
self._occupy(start, end, value)
return
if start < overlapped[0].start:
self._occupy(start, overlapped[0].start, value)
_end = None
for reg, next_reg in pairwise(overlapped):
if next_reg is not None:
self._occupy(reg.end, next_reg.start, value)
_end = min(end, reg.end)
_start = max(start, reg.start)
if _start < _end:
self._occupy(_start, _end, self._meld(reg.value, value, **meld_opts))
continue
break
if end > _end:
self._occupy(_end, end, value)
def _meld(self, old_val, new_val, **kwargs): # pylint:disable=unused-argument
return new_val
def pairwise(iterable, longest=True):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return (zip_longest if longest else zip)(a, b)
| 829 | 5 | 77 |
72af7e53f67960340cc1540cc37442a4bd36cd3d | 8,344 | py | Python | waifustream/index.py | stmobo/waifustream | 04415f5143454458417a589f12c45df533d275c7 | [
"MIT"
] | 1 | 2021-09-16T22:37:56.000Z | 2021-09-16T22:37:56.000Z | waifustream/index.py | stmobo/waifustream | 04415f5143454458417a589f12c45df533d275c7 | [
"MIT"
] | null | null | null | waifustream/index.py | stmobo/waifustream | 04415f5143454458417a589f12c45df533d275c7 | [
"MIT"
] | 1 | 2021-09-16T22:37:57.000Z | 2021-09-16T22:37:57.000Z | import asyncio
import io
import sys
import aiohttp
import aioredis
import attr
from PIL import Image
import imagehash
import numpy as np
"""Posts with these tags will be excluded from indexing.
"""
exclude_tags = [
"loli",
"shota",
"bestiality",
"guro",
"shadman"
]
"""A helper dictionary to convert from single-character ratings to more human-friendly names.
"""
friendly_ratings = {
's': 'Safe',
'q': 'Questionable',
'e': 'Explicit'
}
@attr.s(frozen=True)
async def search_index(redis, imhash, min_threshold=64):
"""Search the index for images with nearby hashes.
Args:
redis (aioredis.Redis): A Redis interface.
imhash (ndarray): An image hash to look up. Must be of type `uint8`.
min_threshold (int): A minimum distance threshold for filtering results.
The result list will only contain images with a result less than
this value.
Returns:
A list of (hash, distance) tuples, sorted by increasing distance.
"""
h_bytes = imhash.tobytes()
keys = []
for idx, val in enumerate(h_bytes):
keys.append(construct_hash_idx_key(idx, val))
hashes = await redis.sunion(*keys)
_t = []
for h in hashes:
arr = np.frombuffer(h, dtype=np.uint8)
dist = hamming_dist(arr, imhash)
if dist < min_threshold:
_t.append((h, dist))
return sorted(_t, key=lambda o: o[1])
async def get_indexed_tags(redis):
"""Get all tags monitored for indexing.
Args:
redis (aioredis.Redis): a Redis interface.
Returns:
A list of monitored tags as `str` objects.
"""
return await redis.lrange('indexed_tags', 0, -1, encoding='utf-8')
async def add_indexed_tag(redis, tag):
"""Add a new tag to be monitored for indexing.
Args:
redis (aioredis.Redis): A Redis interface.
tag (str or bytes): The tag to monitor.
Returns:
The total number of indexed tags (incl. the added tag).
"""
return await redis.lpush('indexed_tags', tag)
async def get_tag_queue_length(redis, tag):
"""Get the current fetch queue length for a given indexed tag.
Args:
redis (aioredis.Redis): A Redis interface.
tag (str or bytes): The indexed tag to inspect.
Returns:
The total number of images awaiting indexing for the tag.
"""
return await redis.llen('index_queue:'+tag)
def diff_hash(img):
"""Compute the difference hash of an image.
Returns:
A `uint8` ndarray.
"""
h = imagehash.dhash(img)
arr = np.packbits(np.where(h.hash.flatten(), 1, 0))
return arr
def avg_hash(img):
"""Compute the average hash of an image.
Returns:
A `uint8` ndarray.
"""
h = imagehash.average_hash(img)
arr = np.packbits(np.where(h.hash.flatten(), 1, 0))
return arr
def combined_hash(img):
"""Compute a combined perceptual hash for an image.
Currently, this is just the concatenation of the dHash and the avgHash.
Returns:
A `uint8` ndarray.
"""
h1 = imagehash.dhash(img)
h1 = np.packbits(np.where(h1.hash.flatten(), 1, 0))
h2 = imagehash.average_hash(img)
h2 = np.packbits(np.where(h2.hash.flatten(), 1, 0))
return np.concatenate((h1, h2))
def hamming_dist(h1, h2):
"""Compute the Hamming distance between two uint8 arrays.
"""
return np.count_nonzero(np.unpackbits(np.bitwise_xor(h1, h2)))
| 28.094276 | 93 | 0.565916 | import asyncio
import io
import sys
import aiohttp
import aioredis
import attr
from PIL import Image
import imagehash
import numpy as np
"""Posts with these tags will be excluded from indexing.
"""
exclude_tags = [
"loli",
"shota",
"bestiality",
"guro",
"shadman"
]
"""A helper dictionary to convert from single-character ratings to more human-friendly names.
"""
friendly_ratings = {
's': 'Safe',
'q': 'Questionable',
'e': 'Explicit'
}
def construct_hash_idx_key(idx, val):
return 'hash_idx:{:02d}:{:02x}'.format(idx, val).encode('utf-8')
@attr.s(frozen=True)
class IndexEntry(object):
def _cvt_imhash(h):
if isinstance(h, np.ndarray):
return h.tobytes()
elif h is None:
return None
else:
return bytes(h)
imhash: bytes = attr.ib(converter=_cvt_imhash)
src: str = attr.ib(converter=str)
src_id: str = attr.ib(converter=str)
src_url: str = attr.ib(converter=str)
characters: tuple = attr.ib(converter=tuple)
rating: str = attr.ib(converter=str)
async def fetch_bytesio(self, http_sess):
"""Download the source image for this entry.
Returns:
A `BytesIO` containing the raw image file data.
"""
bio = io.BytesIO()
async with http_sess.get(self.src_url) as resp:
while True:
chunk = await resp.content.read(8*1024)
if not chunk:
break
bio.write(chunk)
bio.seek(0)
return bio
async def fetch(self, http_sess):
"""Fetch and open the source image for this entry.
Returns:
An Image.
"""
bio = await self.fetch_bytesio(http_sess)
img = Image.open(bio)
img.load()
return img
@property
def imhash_array(self):
"""ndarray: The image hash for this entry, as a uint8 `ndarray`.
"""
return np.frombuffer(self.imhash, dtype=np.uint8)
@classmethod
def from_danbooru_post(cls, imhash, post):
"""Create an IndexEntry from an image hash and a DanbooruPost.
Args:
imhash (bytes or ndarray): An image hash for this entry.
post (DanbooruPost): A DanbooruPost to create this entry from.
Returns:
An IndexEntry.
"""
return cls(
imhash=imhash,
src_id=post.id,
src_url=post.url,
src='danbooru',
characters=post.characters,
rating=post.rating
)
@classmethod
async def load_from_index(cls, redis, imhash):
"""Load the entry for a given image hash from the index.
Args:
redis (aioredis.Redis): A Redis instance.
imhash (bytes or ndarray): An image hash to lookup.
Raises:
KeyError: If the given image hash is not in the index.
Returns:
An IndexEntry.
"""
imhash = cls._cvt_imhash(imhash)
ex = await redis.exists(b'hash:'+imhash+b':src')
if not ex:
raise KeyError("Image with hash "+imhash.hex()+" not found in index")
src, src_id, src_url, rating, characters = await asyncio.gather(
redis.get(b'hash:'+imhash+b':src'),
redis.get(b'hash:'+imhash+b':src_id'),
redis.get(b'hash:'+imhash+b':src_url'),
redis.get(b'hash:'+imhash+b':rating'),
redis.smembers(b'hash:'+imhash+b':characters')
)
return cls(
imhash=imhash,
src=src.decode('utf-8'),
src_id=src_id.decode('utf-8'),
src_url=src_url.decode('utf-8'),
characters=map(lambda c: c.decode('utf-8'), characters),
rating=rating.decode('utf-8')
)
async def add_to_index(self, redis):
"""Add this entry to the index.
Args:
redis (aioredis.Redis): A Redis instance.
Returns:
bool: True if the entry was added, False if it already exists.
"""
await redis.sadd('indexed:'+self.src, self.src_id)
ex = await redis.get(b'hash:'+self.imhash+b':src_id')
if ex is not None:
return False
tr = redis.multi_exec()
tr.set(b'hash:'+self.imhash+b':src', self.src)
tr.set(b'hash:'+self.imhash+b':src_id', self.src_id)
tr.set(b'hash:'+self.imhash+b':src_url', self.src_url)
tr.set(b'hash:'+self.imhash+b':rating', self.rating)
for idx, val in enumerate(self.imhash):
tr.sadd(construct_hash_idx_key(idx, val), self.imhash)
if len(self.characters) > 0:
tr.sadd(b'hash:'+self.imhash+b':characters', *self.characters)
for character in self.characters:
b_char = character.encode('utf-8')
tr.sadd(b'character:'+b_char, self.imhash)
res = await tr.execute()
return True
async def search_index(redis, imhash, min_threshold=64):
"""Search the index for images with nearby hashes.
Args:
redis (aioredis.Redis): A Redis interface.
imhash (ndarray): An image hash to look up. Must be of type `uint8`.
min_threshold (int): A minimum distance threshold for filtering results.
The result list will only contain images with a result less than
this value.
Returns:
A list of (hash, distance) tuples, sorted by increasing distance.
"""
h_bytes = imhash.tobytes()
keys = []
for idx, val in enumerate(h_bytes):
keys.append(construct_hash_idx_key(idx, val))
hashes = await redis.sunion(*keys)
_t = []
for h in hashes:
arr = np.frombuffer(h, dtype=np.uint8)
dist = hamming_dist(arr, imhash)
if dist < min_threshold:
_t.append((h, dist))
return sorted(_t, key=lambda o: o[1])
async def get_indexed_tags(redis):
"""Get all tags monitored for indexing.
Args:
redis (aioredis.Redis): a Redis interface.
Returns:
A list of monitored tags as `str` objects.
"""
return await redis.lrange('indexed_tags', 0, -1, encoding='utf-8')
async def add_indexed_tag(redis, tag):
"""Add a new tag to be monitored for indexing.
Args:
redis (aioredis.Redis): A Redis interface.
tag (str or bytes): The tag to monitor.
Returns:
The total number of indexed tags (incl. the added tag).
"""
return await redis.lpush('indexed_tags', tag)
async def get_tag_queue_length(redis, tag):
"""Get the current fetch queue length for a given indexed tag.
Args:
redis (aioredis.Redis): A Redis interface.
tag (str or bytes): The indexed tag to inspect.
Returns:
The total number of images awaiting indexing for the tag.
"""
return await redis.llen('index_queue:'+tag)
def diff_hash(img):
"""Compute the difference hash of an image.
Returns:
A `uint8` ndarray.
"""
h = imagehash.dhash(img)
arr = np.packbits(np.where(h.hash.flatten(), 1, 0))
return arr
def avg_hash(img):
"""Compute the average hash of an image.
Returns:
A `uint8` ndarray.
"""
h = imagehash.average_hash(img)
arr = np.packbits(np.where(h.hash.flatten(), 1, 0))
return arr
def combined_hash(img):
"""Compute a combined perceptual hash for an image.
Currently, this is just the concatenation of the dHash and the avgHash.
Returns:
A `uint8` ndarray.
"""
h1 = imagehash.dhash(img)
h1 = np.packbits(np.where(h1.hash.flatten(), 1, 0))
h2 = imagehash.average_hash(img)
h2 = np.packbits(np.where(h2.hash.flatten(), 1, 0))
return np.concatenate((h1, h2))
def hamming_dist(h1, h2):
"""Compute the Hamming distance between two uint8 arrays.
"""
return np.count_nonzero(np.unpackbits(np.bitwise_xor(h1, h2)))
| 242 | 4,443 | 45 |
df21c05717c0b8a60809bac83ad0b26927761081 | 481 | py | Python | psqlextra/partitioning/config.py | adamchainz/django-postgres-extra | c11dbb5b75e16f7bd8fd336cc051806cf587269f | [
"MIT"
] | 529 | 2017-03-20T08:16:30.000Z | 2022-03-31T13:23:09.000Z | psqlextra/partitioning/config.py | adamchainz/django-postgres-extra | c11dbb5b75e16f7bd8fd336cc051806cf587269f | [
"MIT"
] | 137 | 2017-06-08T07:59:22.000Z | 2022-02-07T08:34:38.000Z | psqlextra/partitioning/config.py | adamchainz/django-postgres-extra | c11dbb5b75e16f7bd8fd336cc051806cf587269f | [
"MIT"
] | 67 | 2017-06-21T10:01:13.000Z | 2022-02-24T21:23:24.000Z | from psqlextra.models import PostgresPartitionedModel
from .strategy import PostgresPartitioningStrategy
class PostgresPartitioningConfig:
"""Configuration for partitioning a specific model according to the
specified strategy."""
__all__ = ["PostgresPartitioningConfig"]
| 24.05 | 71 | 0.727651 | from psqlextra.models import PostgresPartitionedModel
from .strategy import PostgresPartitioningStrategy
class PostgresPartitioningConfig:
"""Configuration for partitioning a specific model according to the
specified strategy."""
def __init__(
self,
model: PostgresPartitionedModel,
strategy: PostgresPartitioningStrategy,
) -> None:
self.model = model
self.strategy = strategy
__all__ = ["PostgresPartitioningConfig"]
| 170 | 0 | 27 |
7fd603243547da6a03ff3876c283cfae624595d7 | 3,645 | py | Python | examples/faq_matcher_retrieve.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 9 | 2017-10-10T12:24:23.000Z | 2021-08-18T14:07:51.000Z | examples/faq_matcher_retrieve.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 1 | 2020-12-06T11:03:25.000Z | 2021-04-14T05:21:23.000Z | examples/faq_matcher_retrieve.py | praekelt/feersum-nlu-api-wrappers | 6580e2bab2c8a764fe868a505330b3fee6029074 | [
"BSD-3-Clause"
] | 2 | 2019-02-12T08:26:06.000Z | 2022-02-01T09:39:47.000Z | """ Example: Shows how to create, train and use an FAQ matcher. """
import urllib3
import csv
import feersum_nlu
from feersum_nlu.rest import ApiException
from examples import feersumnlu_host, feersum_nlu_auth_token
# Configure API key authorization: APIKeyHeader
configuration = feersum_nlu.Configuration()
# configuration.api_key['AUTH_TOKEN'] = feersum_nlu_auth_token
configuration.api_key['X-Auth-Token'] = feersum_nlu_auth_token # Alternative auth key header!
configuration.host = feersumnlu_host
api_instance = feersum_nlu.FaqMatchersApi(feersum_nlu.ApiClient(configuration))
instance_name = 'test_faq'
# text_input_0 = feersum_nlu.TextInput("Can I give my baby tea?",
# lang_code=None) # optional language hint.
caller_name = 'example_caller'
print()
# The testing samples.
text_sample_list = []
with open('testing_samples.csv',
'r', newline='') as csvfile:
csv_reader = csv.reader(csvfile,
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
for row in csv_reader:
if len(row) >= 3:
lang_code = row[2] if row[2] != '' else None
else:
lang_code = None
text_sample_list.append(feersum_nlu.LabelledTextSample(text=row[1],
label=row[0],
lang_code=lang_code))
try:
# print("Get the parameters:")
# api_response = api_instance.faq_matcher_get_params(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Update the parameters:")
# model_params = \
# feersum_nlu.ModelParams(threshold=1.1)
# api_response = api_instance.faq_matcher_set_params(instance_name, model_params)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Get the details of specific named loaded FAQ matcher:")
# api_response = api_instance.faq_matcher_get_details(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# cm_labels = api_response.cm_labels
# print()
print("Match a question:")
correct = 0
total = 0
for sample in text_sample_list:
if True: # sample.lang_code == 'eng':
text_input = feersum_nlu.TextInput(sample.text,
lang_code=None) # optional language hint.
api_response = api_instance.faq_matcher_retrieve(instance_name, text_input, x_caller=caller_name)
top_k = 2
response_label_set = set()
for i in range(min(top_k, len(api_response))):
response_label_set.add(api_response[i].label[:20])
if sample.label[:20] in response_label_set:
correct = correct + 1
print('.', sample.text, ", ", sample.label)
else:
print('x', sample.text, ", ", sample.label)
total = total + 1
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
print("accuracy =", correct / total)
except ApiException as e:
print("Exception when calling an FAQ matcher operation: %s\n" % e)
except urllib3.exceptions.HTTPError as e:
print("Connection HTTPError! %s\n" % e)
| 35.735294 | 110 | 0.598903 | """ Example: Shows how to create, train and use an FAQ matcher. """
import urllib3
import csv
import feersum_nlu
from feersum_nlu.rest import ApiException
from examples import feersumnlu_host, feersum_nlu_auth_token
# Configure API key authorization: APIKeyHeader
configuration = feersum_nlu.Configuration()
# configuration.api_key['AUTH_TOKEN'] = feersum_nlu_auth_token
configuration.api_key['X-Auth-Token'] = feersum_nlu_auth_token # Alternative auth key header!
configuration.host = feersumnlu_host
api_instance = feersum_nlu.FaqMatchersApi(feersum_nlu.ApiClient(configuration))
instance_name = 'test_faq'
# text_input_0 = feersum_nlu.TextInput("Can I give my baby tea?",
# lang_code=None) # optional language hint.
caller_name = 'example_caller'
print()
# The testing samples.
text_sample_list = []
with open('testing_samples.csv',
'r', newline='') as csvfile:
csv_reader = csv.reader(csvfile,
delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
for row in csv_reader:
if len(row) >= 3:
lang_code = row[2] if row[2] != '' else None
else:
lang_code = None
text_sample_list.append(feersum_nlu.LabelledTextSample(text=row[1],
label=row[0],
lang_code=lang_code))
try:
# print("Get the parameters:")
# api_response = api_instance.faq_matcher_get_params(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Update the parameters:")
# model_params = \
# feersum_nlu.ModelParams(threshold=1.1)
# api_response = api_instance.faq_matcher_set_params(instance_name, model_params)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
#
# print("Get the details of specific named loaded FAQ matcher:")
# api_response = api_instance.faq_matcher_get_details(instance_name)
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# cm_labels = api_response.cm_labels
# print()
print("Match a question:")
correct = 0
total = 0
for sample in text_sample_list:
if True: # sample.lang_code == 'eng':
text_input = feersum_nlu.TextInput(sample.text,
lang_code=None) # optional language hint.
api_response = api_instance.faq_matcher_retrieve(instance_name, text_input, x_caller=caller_name)
top_k = 2
response_label_set = set()
for i in range(min(top_k, len(api_response))):
response_label_set.add(api_response[i].label[:20])
if sample.label[:20] in response_label_set:
correct = correct + 1
print('.', sample.text, ", ", sample.label)
else:
print('x', sample.text, ", ", sample.label)
total = total + 1
# print(" type(api_response)", type(api_response))
# print(" api_response", api_response)
# print()
print("accuracy =", correct / total)
except ApiException as e:
print("Exception when calling an FAQ matcher operation: %s\n" % e)
except urllib3.exceptions.HTTPError as e:
print("Connection HTTPError! %s\n" % e)
| 0 | 0 | 0 |
c84bf19d4ed25d9e86c50530c08f285cdf06b330 | 158 | py | Python | langsea/models/category.py | blancheta/langsea | e268b43fb94e3234ac161f2e5d9600d51360e4b3 | [
"MIT"
] | 1 | 2016-10-20T19:30:40.000Z | 2016-10-20T19:30:40.000Z | langsea/models/category.py | blancheta/langsea | e268b43fb94e3234ac161f2e5d9600d51360e4b3 | [
"MIT"
] | null | null | null | langsea/models/category.py | blancheta/langsea | e268b43fb94e3234ac161f2e5d9600d51360e4b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
| 14.363636 | 37 | 0.620253 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Category:
def __init__(self, id, name, image):
self.id = id
self.name = name
self.image = image
| 70 | -6 | 47 |
d3c476e5772a7748aedc645e1e0993831ec32adc | 1,024 | py | Python | Pages/LoginPage.py | bwheel/home-router | c166d0a61136c81ced5cb999cb13396dc00a915f | [
"MIT"
] | null | null | null | Pages/LoginPage.py | bwheel/home-router | c166d0a61136c81ced5cb999cb13396dc00a915f | [
"MIT"
] | null | null | null | Pages/LoginPage.py | bwheel/home-router | c166d0a61136c81ced5cb999cb13396dc00a915f | [
"MIT"
] | null | null | null | from time import sleep
from selenium.webdriver import Chrome
from selenium.webdriver.remote.webelement import WebElement
from Pages.BasicHomePage import BasicHomePage
| 36.571429 | 82 | 0.612305 | from time import sleep
from selenium.webdriver import Chrome
from selenium.webdriver.remote.webelement import WebElement
from Pages.BasicHomePage import BasicHomePage
class LoginPage(object):
def __init__(self, driver: Chrome):
self.driver = driver
def Login(self, password: str ) -> (BasicHomePage, str):
try:
# Scrape elements
form: WebElement = self.driver.find_element_by_id('form-login')
divWrap: WebElement = form.find_element_by_class_name('widget-wrap')
inputPassword: WebElement = divWrap.find_element_by_tag_name('input')
btnSubmit: WebElement = form.find_element_by_id('login-btn')
#interact with UI.
inputPassword.send_keys(password)
btnSubmit.click()
print("Logging in...")
sleep(6.5)
return (BasicHomePage(self.driver), None)
except Exception as ex:
return (None, str(ex))
| 748 | 3 | 83 |
f58d5e780787d129767fd53c4074fb43a80bc09a | 3,560 | py | Python | src/ovirtcli/command/console.py | oVirt/ovirt-engine-cli | 422d70e1dc422f0ca248abea47a472e3605caa4b | [
"Apache-2.0"
] | 4 | 2015-11-29T08:53:03.000Z | 2022-02-05T14:10:24.000Z | src/ovirtcli/command/console.py | oVirt/ovirt-engine-cli | 422d70e1dc422f0ca248abea47a472e3605caa4b | [
"Apache-2.0"
] | null | null | null | src/ovirtcli/command/console.py | oVirt/ovirt-engine-cli | 422d70e1dc422f0ca248abea47a472e3605caa4b | [
"Apache-2.0"
] | 4 | 2015-02-06T02:06:53.000Z | 2020-03-24T07:13:05.000Z | #
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ovirtcli.platform import vnc, spice
from ovirtcli.command.command import OvirtCommand
from cli.messages import Messages
| 33.584906 | 87 | 0.544101 | #
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ovirtcli.platform import vnc, spice
from ovirtcli.command.command import OvirtCommand
from cli.messages import Messages
class ConsoleCommand(OvirtCommand):
name = 'console'
description = 'open a console to a VM'
args_check = 1
helptext = """\
== Usage ==
console <vm>
== Description ==
This opens up a graphical console to a virtual machine. Depending on
the virtual machine display, this will fire up an external VNC or
SPICE client.
"""
def execute(self):
self.check_connection()
args = self.arguments
CONSOLE_STATES = ['powering_up', 'up', 'reboot_in_progress']
host_subject = ''
vm = self.get_object('vm', args[0])
if vm is None:
self.error(
Messages.Error.NO_SUCH_OBJECT % ('vm', args[0])
)
if vm.status.state not in CONSOLE_STATES:
self.error(
Messages.Error.CANNOT_CONNECT_TO_VM_DUE_TO_INVALID_STATE +
Messages.Info.POSSIBLE_VM_STATES_FOR_CONSOLE % str(CONSOLE_STATES)
)
host_addr = vm.display.address
proto = vm.display.type_
port = vm.display.port
secport = vm.display.secure_port
action = vm.ticket()
if action.status.state != 'complete':
self.error(
Messages.Error.CANNOT_SET_VM_TICKET
)
ticket = action.ticket.value
debug = self.context.settings['cli:debug']
if proto == 'vnc':
vnc.launch_vnc_client(host_addr, port, ticket, vm.name, debug)
elif proto == 'spice':
if vm.host and vm.host.id:
host = self.get_object('host', vm.host.id)
if host:
if hasattr(host, 'certificate') and host.certificate:
if host.certificate.subject:
host_subject = host.certificate.subject
else:
self.warning(
Messages.Warning.CANNOT_FETCH_HOST_CERT_SUBJECT
)
else:
self.warning(
Messages.Warning.CANNOT_FETCH_HOST_CERT_SUBJECT_LEGACY_SDK
)
if host_subject == '':
self.warning(
Messages.Warning.HOST_IDENTITY_WILL_NOT_BE_VALIDATED
)
spice.launch_spice_client(
host_addr,
host_subject,
port, secport,
ticket,
self.context.url,
vm.name,
vm.display.proxy,
debug
)
else:
self.error(
Messages.Error.INVALID_DISPLAY_PROTOCOL % proto
)
| 2,425 | 401 | 23 |
3062f8313fc3aca6daae7181a5eebdc1a0b1cf74 | 1,626 | py | Python | sample/api/helpdesk.py | VXenomac/oapi-sdk-python | 156b789b3d20653802f64842c9a26229dd9252d7 | [
"Apache-2.0"
] | 50 | 2021-04-11T05:24:10.000Z | 2022-03-29T10:14:13.000Z | sample/api/helpdesk.py | larksuite/oapi-sdk-python | 70fda5b1ccf765938bf207dff0117c0c03a93605 | [
"Apache-2.0"
] | 20 | 2021-04-07T15:17:44.000Z | 2022-03-23T06:27:12.000Z | sample/api/helpdesk.py | VXenomac/oapi-sdk-python | 156b789b3d20653802f64842c9a26229dd9252d7 | [
"Apache-2.0"
] | 8 | 2021-04-25T15:02:17.000Z | 2022-03-13T15:00:59.000Z | # -*- coding: UTF-8 -*-
from larksuiteoapi.api import Request, FormData, FormDataFile, set_timeout, set_path_params, set_query_params, \
set_is_response_stream, set_response_stream, set_tenant_key, set_need_help_desk_auth
from larksuiteoapi import Config, ACCESS_TOKEN_TYPE_TENANT, ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_APP, \
APP_TICKET_KEY_PREFIX, DOMAIN_FEISHU, LEVEL_ERROR, LEVEL_DEBUG
# for Cutome APP(企业自建应用)
app_settings = Config.new_internal_app_settings(app_id='cli_a04677****8d01b',
app_secret='XcplX2QLU7X******VJKHd6Yzvt',
verification_token='', encrypt_key='',
help_desk_id='696874*****390932',
help_desk_token='ht-c82db92*******f5cf6e569aa')
# for redis store and logger(level=debug)
# conf = test_config_with_redis_store(DOMAIN_FEISHU, app_settings)
# for memory store and logger(level=debug)
conf = Config("https://open.feishu-boe.cn", app_settings, log_level=LEVEL_DEBUG)
if __name__ == '__main__':
test_ticket_detail()
| 41.692308 | 112 | 0.647601 | # -*- coding: UTF-8 -*-
from larksuiteoapi.api import Request, FormData, FormDataFile, set_timeout, set_path_params, set_query_params, \
set_is_response_stream, set_response_stream, set_tenant_key, set_need_help_desk_auth
from larksuiteoapi import Config, ACCESS_TOKEN_TYPE_TENANT, ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_APP, \
APP_TICKET_KEY_PREFIX, DOMAIN_FEISHU, LEVEL_ERROR, LEVEL_DEBUG
# for Cutome APP(企业自建应用)
app_settings = Config.new_internal_app_settings(app_id='cli_a04677****8d01b',
app_secret='XcplX2QLU7X******VJKHd6Yzvt',
verification_token='', encrypt_key='',
help_desk_id='696874*****390932',
help_desk_token='ht-c82db92*******f5cf6e569aa')
# for redis store and logger(level=debug)
# conf = test_config_with_redis_store(DOMAIN_FEISHU, app_settings)
# for memory store and logger(level=debug)
conf = Config("https://open.feishu-boe.cn", app_settings, log_level=LEVEL_DEBUG)
def test_ticket_detail():
req = Request('/open-apis/helpdesk/v1/tickets/6971250929135779860', 'GET', ACCESS_TOKEN_TYPE_TENANT, None,
request_opts=[set_timeout(3), set_need_help_desk_auth()])
resp = req.do(conf)
print('header = %s' % resp.get_header().items())
print('request id = %s' % resp.get_request_id())
print(resp.code)
if resp.code == 0:
print(resp.data)
else:
print(resp.msg)
print(resp.error)
if __name__ == '__main__':
test_ticket_detail()
| 450 | 0 | 23 |
31d60ea4aa57f5282fce5df0164dd3a9d3eb8ce8 | 2,475 | py | Python | rc_class/model_rnn_ridge.py | E-dog91/Reservoir-Reinforcement-LEarning-for-Trading- | 3e91ee67f899555b2b02affcdf69d510671ad387 | [
"MIT"
] | null | null | null | rc_class/model_rnn_ridge.py | E-dog91/Reservoir-Reinforcement-LEarning-for-Trading- | 3e91ee67f899555b2b02affcdf69d510671ad387 | [
"MIT"
] | null | null | null | rc_class/model_rnn_ridge.py | E-dog91/Reservoir-Reinforcement-LEarning-for-Trading- | 3e91ee67f899555b2b02affcdf69d510671ad387 | [
"MIT"
] | null | null | null | from collections import OrderedDict
import torch
import torch.nn as nn
from rc_class.base_model_rc_ridge import Base_model_rc_ridge
| 49.5 | 120 | 0.633131 | from collections import OrderedDict
import torch
import torch.nn as nn
from rc_class.base_model_rc_ridge import Base_model_rc_ridge
class Model_rnn_ridge(Base_model_rc_ridge):
def __init__(self, input_dim, hidden_dim, output_dim,
w_generator, win_generator, wbias_generator, h0_Generator, h0_params,
learning_algo='inv', ridge_param=0.0, washout=0, dtype=torch.float32):
super().__init__(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim,
w_generator=w_generator, win_generator=win_generator,
wbias_generator=wbias_generator,
h0_Generator=h0_Generator, h0_params=h0_params,
learning_algo=learning_algo, ridge_param=ridge_param, washout=washout,
dtype=dtype) # net.nn_predict will return only the output.
# intializes the hidden cell
self.rec_cell = nn.RNN(input_size=self._input_dim, hidden_size=self._hidden_dim, batch_first=True,
nonlinearity='relu')
for param in self.rec_cell.parameters(): # unabling the gradient descent on the Rec Cell
param.requires_grad = False
# Generate matrices
# creates the hidden cell parameters
# the gru cells needs 3 matrices for each the in_matrix, in_bias, recurrent_matrix and recurrent_bias
# we choose to simplify the expressions as they are initialized the same way
W_ih, W_hh, b_ih, b_hh = self._generate_matrices(self._w_generator, self._win_generator, self._wbias_generator,
self.dtype,
self._input_dim, self._hidden_dim, self._hidden_dim)
# past parameters into the hidden cell
new_state_dict = OrderedDict({'weight_ih_l0': W_ih, 'weight_hh_l0': W_hh,
'bias_ih_l0': b_ih, 'bias_hh_l0': b_hh})
self.rec_cell.load_state_dict(new_state_dict, strict=False)
def _hidden_state_init(self, batch_size):
h0 = self._h0_Generator(**self._h0_params).generate(size=(1, batch_size, self._hidden_dim), dtype=self.dtype)
return h0
@property
def rec_cell(self):
return self._rec_cell
@rec_cell.setter
def rec_cell(self, new_rec_cell):
self._rec_cell = new_rec_cell
| 2,135 | 174 | 24 |
92b77eca6d6b86ce6af489641a1decc43d8b4cc6 | 1,529 | py | Python | segmentron/data/dataloader/trans10k_extra.py | xtudbxk/fanet | 7bdea393166e7a2873ed8a82ce31c0ac14e8b69c | [
"Apache-2.0"
] | 1 | 2022-03-30T13:13:35.000Z | 2022-03-30T13:13:35.000Z | segmentron/data/dataloader/trans10k_extra.py | xtudbxk/fanet | 7bdea393166e7a2873ed8a82ce31c0ac14e8b69c | [
"Apache-2.0"
] | null | null | null | segmentron/data/dataloader/trans10k_extra.py | xtudbxk/fanet | 7bdea393166e7a2873ed8a82ce31c0ac14e8b69c | [
"Apache-2.0"
] | 1 | 2022-03-31T06:40:58.000Z | 2022-03-31T06:40:58.000Z | """Prepare Trans10K dataset"""
import os
import torch
import numpy as np
import logging
from PIL import Image
from IPython import embed
from ...config import cfg
from .seg_data_base import SegmentationDataset
from .trans10k_with_fakemix import TransSegmentationWithFakeMix
| 31.204082 | 99 | 0.646828 | """Prepare Trans10K dataset"""
import os
import torch
import numpy as np
import logging
from PIL import Image
from IPython import embed
from ...config import cfg
from .seg_data_base import SegmentationDataset
from .trans10k_with_fakemix import TransSegmentationWithFakeMix
class TransExtraSegmentation(TransSegmentationWithFakeMix):
def get_pairs(self, folder, split):
def get_path_pairs(image_folder):
image_paths = []
images = os.listdir(image_folder)
for imagename in images:
imagepath = os.path.join(image_folder, imagename)
if os.path.isfile(imagepath):
image_paths.append(imagepath)
else:
logging.info('cannot find the image:', imagepath)
logging.info('Found {} images in the folder {}'.format(len(image_paths), image_folder))
return image_paths
image_folder = folder
image_paths = get_path_pairs(image_folder)
return image_paths,None
def __getitem__(self, index):
img = Image.open(self.image_paths[index]).convert('RGB')
mask = np.zeros_like(np.array(img))[:,:,0]
assert mask.max()<=2, mask.max()
mask = Image.fromarray(mask)
# synchrosized transform
img, mask = self._val_sync_transform(img, mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask, self.image_paths[index]
| 1,139 | 38 | 76 |
965990a507923f1aca77234ed7ed3f647fc5edb4 | 10,520 | py | Python | doxy_db/db.py | abathur/doxy_db | fb7767dbdd0b41614d8a551609d9d6ff064b3e9c | [
"MIT"
] | null | null | null | doxy_db/db.py | abathur/doxy_db | fb7767dbdd0b41614d8a551609d9d6ff064b3e9c | [
"MIT"
] | null | null | null | doxy_db/db.py | abathur/doxy_db | fb7767dbdd0b41614d8a551609d9d6ff064b3e9c | [
"MIT"
] | null | null | null | import sqlite3
from . import sql
from . import views
from . import exceptions
from . import makes
class DoxygenSQLite3(object):
"""TODO"""
indexpage = None
connection = None
types = None
# extend and compile are the same logic, but are separate for API semantics/readability.
def extend(self, func):
"""
Extend this object's API via a user-specified function.
def add_api_methods(api):
api.class_doc = api.make_compound_tree(
["class"],
api.relations.get("methods")
)
manual.extend(add_api_methods)
"""
return func(self)
def compile(self, func):
"""
Extend this object's API via a user-specified function.
def add_sections(man):
man.mount(
"example",
man.class_doc(name="Example_Test")
)
manual.compile(add_sections)
"""
return func(self)
# ---------------------------------- #
# View factories; used to extend the API and generate manual sections.
def topmost(self, kinds, brief_description, search_relation=None):
"""
Generate a view that will find compounds of 'kinds' that have no parent.
Note: I thought I could build something similar to the default HTML manual by throwing the page, class, and group kinds through topmost; I was profoundly wrong. There a number of small caveats regarding what appears in those lists and which relations dictate the hierarchy it encodes.
"""
return views.ListView(
sql.Statement(self, self._def)._where(
"base.kind in ('{kinds}') and base.rowid not in (select distinct rowid from inner_outer where [kind:1] in ('{kinds}'))".format(
kinds="','".join(kinds)
)
),
brief_description,
search_relation=search_relation,
)
def kinds(self, kinds, brief_description, search_relation=None):
"""Generate a view that will find elements of 'kinds' """
return views.ListView(
sql.Statement(self, self._def)._where(
"base.kind in ('{kinds}')".format(kinds="','".join(kinds))
),
brief_description,
search_relation=search_relation,
)
def make_compound_tree(self, kinds, search_relation):
"""
Generate a factory that itself generates views locked on a certain compound.
Easier to grasp with an example of how it can be used at a higher layer.
man.class_doc = man.make_compound_tree(
["class"],
man.relations.get("methods")
)
man.mount(
"example",
man.class_doc(name="Example_Test")
)
First, this creates a view-factory named 'class_doc'. It generates views that will search class compounddefs for one matching a consumer-specified property, lock onto the matched class doc, and support enumerating that class's methods.
Second, it uses the new view factory to generate a view that targets the Example_Test class, and mounts it as a manual section named 'example'.
"""
return compound_tree
# TODO: I'm skipping a potentially useful method for scaffolding a doc section based on searching a directory for compounds. I took 4 quick swings at this functionality that all ran into intractable problems. I don't want to force a solution, and I don't want to get bogged down in another big segment of functionality before launching.
#
# That said, I do want to preserve progress ths far in case it is useful.
#
# Here's a basic query that lets you search a directory by name and enumerate its compounds:
# select def.* from contains join def on contains.inner_rowid=def.rowid where contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def file on file.rowid=contains.inner_rowid where compounddef.name='obj');
#
# This list-view-based model kinda works for the first couple steps of a deeper search:
# >>> x.doc_search("std")
# >>> x.doc_search("std obj_armour")
#
# But the last part falls flat on its face:
#
# >>> x.doc_search("std obj_armour query_ego")
# doxy_db.exceptions.MalformedQuery: ('Malformed query', "SELECT [innercompounds].* FROM def base JOIN contains as relative ON base.rowid=relative.outer_rowid JOIN def [innercompounds] ON [innercompounds].rowid=relative.inner_rowid WHERE contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def as file on file.rowid=contains.inner_rowid where compounddef.name='obj') AND base.rowid=? AND innercompounds.name=?", (381, 'query_ego'))
#
# This generated query is broken in like 3 places:
#
# - the outer contains.outer_rowid would need to be 'relative.outer_rowid'
# - the first join contains as relative needs to be on relative.inner_rowid
# - and, most critically, the query needs additional layers to even begin to actually query members of the intended compound.
#
# At a conceptual level, I think this approach runs into a few problems (there might be a less-disruptive approach...):
# - We need to extend the search_relation concept to an additional layer of depth in order to support first jumping from the directory compound to the appropriate sub-compound and then again to its members
# - we could in theory just make the feature a little more rigid, and don't allow a restrictable list? or, use inner_compound, but add a custom 'where kind=?' to the query and let people specify a text kind--but we'll also need to overload the parts of the search/find process that lean on relations for depth search.
# - I'm not entirely sure if the minimal query wrapper API I built is actually capable of handling queries nesting through this many joins or nested selects very robustly
# - and it's probably a fool's-errand to try to develop it to that level of edge-case support
# - one potential out might be a more basic raw-query mode. The point of the wrapper is to make easier to build up queries that reference each other's parts, but if this task really is an edge case, that scaffolding isn't essential.
# - It probably needs its own view or view abstraction; there's just too much edge-case stuff.
#
# def directory(self, name, search_relation=None):
# # TODO Not quite sure where, but somewhere I need to test and/or doc this behavior.
# if search_relation is None:
# search_relation = self.relations.get("innercompounds")
# return views.ListView(
# sql.Statement(self, self._def)
# ._select("def.*")
# ._from("contains")
# ._where("contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def as file on file.rowid=contains.inner_rowid where compounddef.name='{}')".format(name))
# ._join(conditions="def on contains.inner_rowid=def.rowid"),
# "Contents of directory {}".format(name),
# search_relation=search_relation
# )
#
# Some other notes I had on this concept elsewhere:
# - we could cheat and substring fns:
# select * from compounddef where kind='class' and id_file in (select rowid from files where name like 'obj/doxy_guide%');
# man.mount("std", struct().where(id_file=file().where(name like path%)))
# - I could imagine a chainable API like: classes(actions).members(**optionally kind="function")
| 50.334928 | 504 | 0.630608 | import sqlite3
from . import sql
from . import views
from . import exceptions
from . import makes
class DoxygenSQLite3(object):
"""TODO"""
indexpage = None
connection = None
types = None
def __init__(
self,
uri,
type_factory=makes.default_types,
atom_factory=makes.default_atoms,
relation_factory=makes.default_relations,
):
self.types = type_factory and type_factory()
self.atoms = atom_factory and atom_factory()
self.relations = relation_factory and relation_factory()
# use URI so that a missing file will error, not implicitly create
connection = sqlite3.connect("file:{}?mode=rw".format(uri), uri=True)
connection.row_factory = self.types.row_factory()
connection.execute("PRAGMA temp_store = MEMORY;")
self.connection = connection
# _def is a stepping stone to bigger queries
self._def = sql.Statement(self).table("def", "rowid")._from("def base")
self.relview = views.RelationView(
sql.Statement(self, self._def)._select("*")._where("base.rowid=?")
)
# extend and compile are the same logic, but are separate for API semantics/readability.
def extend(self, func):
"""
Extend this object's API via a user-specified function.
def add_api_methods(api):
api.class_doc = api.make_compound_tree(
["class"],
api.relations.get("methods")
)
manual.extend(add_api_methods)
"""
return func(self)
def compile(self, func):
"""
Extend this object's API via a user-specified function.
def add_sections(man):
man.mount(
"example",
man.class_doc(name="Example_Test")
)
manual.compile(add_sections)
"""
return func(self)
# ---------------------------------- #
# View factories; used to extend the API and generate manual sections.
def topmost(self, kinds, brief_description, search_relation=None):
"""
Generate a view that will find compounds of 'kinds' that have no parent.
Note: I thought I could build something similar to the default HTML manual by throwing the page, class, and group kinds through topmost; I was profoundly wrong. There a number of small caveats regarding what appears in those lists and which relations dictate the hierarchy it encodes.
"""
return views.ListView(
sql.Statement(self, self._def)._where(
"base.kind in ('{kinds}') and base.rowid not in (select distinct rowid from inner_outer where [kind:1] in ('{kinds}'))".format(
kinds="','".join(kinds)
)
),
brief_description,
search_relation=search_relation,
)
def kinds(self, kinds, brief_description, search_relation=None):
"""Generate a view that will find elements of 'kinds' """
return views.ListView(
sql.Statement(self, self._def)._where(
"base.kind in ('{kinds}')".format(kinds="','".join(kinds))
),
brief_description,
search_relation=search_relation,
)
def make_compound_tree(self, kinds, search_relation):
"""
Generate a factory that itself generates views locked on a certain compound.
Easier to grasp with an example of how it can be used at a higher layer.
man.class_doc = man.make_compound_tree(
["class"],
man.relations.get("methods")
)
man.mount(
"example",
man.class_doc(name="Example_Test")
)
First, this creates a view-factory named 'class_doc'. It generates views that will search class compounddefs for one matching a consumer-specified property, lock onto the matched class doc, and support enumerating that class's methods.
Second, it uses the new view factory to generate a view that targets the Example_Test class, and mounts it as a manual section named 'example'.
"""
def compound_tree(**kwarg):
if "refid" in kwarg:
# TODO: I need to button up the SQL injection attacks present against this module; I'm not sure what import that has for structures like this. They're fine if the caller is in charge of them, but they could prove to be a big footgun if you route a user-provided 'kind' in here...
return views.DocView(
sql.Statement(self, self._def)
._select("base.*")
._where(
"base.kind in ('{}') and base.refid='{}'".format(
"','".join(kinds), kwarg["refid"]
)
),
search_relation=search_relation,
)
elif "name" in kwarg:
return views.DocView(
sql.Statement(self, self._def)
._select("base.*")
._where(
"base.kind in ('{}') and base.name='{}'".format(
"','".join(kinds), kwarg["name"]
)
),
search_relation=search_relation,
)
else:
raise exceptions.InvalidUsage(
"compound_tree missing required 'refid' or 'name' argument"
)
return compound_tree
# TODO: I'm skipping a potentially useful method for scaffolding a doc section based on searching a directory for compounds. I took 4 quick swings at this functionality that all ran into intractable problems. I don't want to force a solution, and I don't want to get bogged down in another big segment of functionality before launching.
#
# That said, I do want to preserve progress ths far in case it is useful.
#
# Here's a basic query that lets you search a directory by name and enumerate its compounds:
# select def.* from contains join def on contains.inner_rowid=def.rowid where contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def file on file.rowid=contains.inner_rowid where compounddef.name='obj');
#
# This list-view-based model kinda works for the first couple steps of a deeper search:
# >>> x.doc_search("std")
# >>> x.doc_search("std obj_armour")
#
# But the last part falls flat on its face:
#
# >>> x.doc_search("std obj_armour query_ego")
# doxy_db.exceptions.MalformedQuery: ('Malformed query', "SELECT [innercompounds].* FROM def base JOIN contains as relative ON base.rowid=relative.outer_rowid JOIN def [innercompounds] ON [innercompounds].rowid=relative.inner_rowid WHERE contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def as file on file.rowid=contains.inner_rowid where compounddef.name='obj') AND base.rowid=? AND innercompounds.name=?", (381, 'query_ego'))
#
# This generated query is broken in like 3 places:
#
# - the outer contains.outer_rowid would need to be 'relative.outer_rowid'
# - the first join contains as relative needs to be on relative.inner_rowid
# - and, most critically, the query needs additional layers to even begin to actually query members of the intended compound.
#
# At a conceptual level, I think this approach runs into a few problems (there might be a less-disruptive approach...):
# - We need to extend the search_relation concept to an additional layer of depth in order to support first jumping from the directory compound to the appropriate sub-compound and then again to its members
# - we could in theory just make the feature a little more rigid, and don't allow a restrictable list? or, use inner_compound, but add a custom 'where kind=?' to the query and let people specify a text kind--but we'll also need to overload the parts of the search/find process that lean on relations for depth search.
# - I'm not entirely sure if the minimal query wrapper API I built is actually capable of handling queries nesting through this many joins or nested selects very robustly
# - and it's probably a fool's-errand to try to develop it to that level of edge-case support
# - one potential out might be a more basic raw-query mode. The point of the wrapper is to make easier to build up queries that reference each other's parts, but if this task really is an edge case, that scaffolding isn't essential.
# - It probably needs its own view or view abstraction; there's just too much edge-case stuff.
#
# def directory(self, name, search_relation=None):
# # TODO Not quite sure where, but somewhere I need to test and/or doc this behavior.
# if search_relation is None:
# search_relation = self.relations.get("innercompounds")
# return views.ListView(
# sql.Statement(self, self._def)
# ._select("def.*")
# ._from("contains")
# ._where("contains.outer_rowid in (select file.rowid from contains join compounddef on contains.outer_rowid=compounddef.rowid join def as file on file.rowid=contains.inner_rowid where compounddef.name='{}')".format(name))
# ._join(conditions="def on contains.inner_rowid=def.rowid"),
# "Contents of directory {}".format(name),
# search_relation=search_relation
# )
#
# Some other notes I had on this concept elsewhere:
# - we could cheat and substring fns:
# select * from compounddef where kind='class' and id_file in (select rowid from files where name like 'obj/doxy_guide%');
# man.mount("std", struct().where(id_file=file().where(name like path%)))
# - I could imagine a chainable API like: classes(actions).members(**optionally kind="function")
def root_page(self, field, name):
return views.DocView(
sql.Statement(self, self._def)
._select(
", ".join(
["compounddef.{}".format(x) for x in self.types.cols("compound")]
)
)
._where("base.kind='page' and base.{}='{}'".format(field, name))
._join(conditions="compounddef on compounddef.rowid=base.rowid")
)
| 2,677 | 0 | 85 |
19dad37d6dd89a264bef50d3320be7195d214332 | 1,357 | py | Python | setup.py | batflyer/awesome-bayes-nets | e11ec9d98acb72bb90928e212021fed55fd1b233 | [
"CC0-1.0"
] | 8 | 2019-06-15T09:15:20.000Z | 2022-02-20T03:08:13.000Z | setup.py | batflyer/awesome-bayes-nets | e11ec9d98acb72bb90928e212021fed55fd1b233 | [
"CC0-1.0"
] | null | null | null | setup.py | batflyer/awesome-bayes-nets | e11ec9d98acb72bb90928e212021fed55fd1b233 | [
"CC0-1.0"
] | 3 | 2020-01-28T20:16:41.000Z | 2021-08-17T18:56:21.000Z | """
Setup file for awesome-bib-builder
"""
import codecs
import os
from setuptools import find_packages, setup
# Get __version__ from _meta.py
_meta_file = os.path.join("src", "_meta.py")
with open(_meta_file) as f:
exec(f.read())
DISTNAME = "awesome-bib-builder"
DESCRIPTION = "Tool for generating an awesome README from bib files."
with codecs.open("src/README.md", encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = __author__
MAINTAINER_EMAIL = __email__
URL = "https://github.com/batflyer/awesome-bayes-net"
LICENSE = __license__
DOWNLOAD_URL = "https://github.com/batflyer/awesome-bayes-net"
VERSION = __version__
CLASSIFIERS = [
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
]
INSTALL_REQUIRES = ["liquidpy==0.0.6", "bibtexparser==1.1.0"]
EXTRAS_REQUIRE = {"tests": ["pytest", "pytest-cov"], "docs": ["sphinx"]}
setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
)
| 27.14 | 72 | 0.719234 | """
Setup file for awesome-bib-builder
"""
import codecs
import os
from setuptools import find_packages, setup
# Get __version__ from _meta.py
_meta_file = os.path.join("src", "_meta.py")
with open(_meta_file) as f:
exec(f.read())
DISTNAME = "awesome-bib-builder"
DESCRIPTION = "Tool for generating an awesome README from bib files."
with codecs.open("src/README.md", encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = __author__
MAINTAINER_EMAIL = __email__
URL = "https://github.com/batflyer/awesome-bayes-net"
LICENSE = __license__
DOWNLOAD_URL = "https://github.com/batflyer/awesome-bayes-net"
VERSION = __version__
CLASSIFIERS = [
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
]
INSTALL_REQUIRES = ["liquidpy==0.0.6", "bibtexparser==1.1.0"]
EXTRAS_REQUIRE = {"tests": ["pytest", "pytest-cov"], "docs": ["sphinx"]}
setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
)
| 0 | 0 | 0 |
e221cebb8f8dd6e0b6178559f588ab9dff739c06 | 149 | py | Python | catalog/bindings/csw/anim_add_accum_attrs_accumulate.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/anim_add_accum_attrs_accumulate.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/csw/anim_add_accum_attrs_accumulate.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from enum import Enum
__NAMESPACE__ = "http://www.w3.org/2001/SMIL20/"
| 16.555556 | 48 | 0.691275 | from enum import Enum
__NAMESPACE__ = "http://www.w3.org/2001/SMIL20/"
class AnimAddAccumAttrsAccumulate(Enum):
NONE = "none"
SUM = "sum"
| 0 | 53 | 23 |
6393b534607e1cae4cba8a92d28f3191f483d6b3 | 338 | py | Python | snmpagent_unity/commands/__init__.py | emc-openstack/snmp-agent | 466cd3ac072cd3a67c85ae9a4d3c7da3ae7ec9fc | [
"Apache-2.0"
] | 2 | 2019-03-01T11:14:59.000Z | 2019-10-02T17:47:59.000Z | snmpagent_unity/commands/__init__.py | emc-openstack/snmp-agent | 466cd3ac072cd3a67c85ae9a4d3c7da3ae7ec9fc | [
"Apache-2.0"
] | 2 | 2019-03-01T11:26:29.000Z | 2019-10-11T18:56:54.000Z | snmpagent_unity/commands/__init__.py | emc-openstack/snmp-agent | 466cd3ac072cd3a67c85ae9a4d3c7da3ae7ec9fc | [
"Apache-2.0"
] | 1 | 2019-10-03T21:09:17.000Z | 2019-10-03T21:09:17.000Z | from snmpagent_unity.commands import user, community, crypto, service
CMD = [user.AddUser, user.UpdateUser, user.DeleteUser, user.ListUsers,
community.CreateCommunity, community.DeleteCommunity,
crypto.Encrypt, crypto.Decrypt,
service.Start, service.Stop, service.Restart]
CMD_DICT = {cmd.name: cmd for cmd in CMD}
| 37.555556 | 70 | 0.751479 | from snmpagent_unity.commands import user, community, crypto, service
CMD = [user.AddUser, user.UpdateUser, user.DeleteUser, user.ListUsers,
community.CreateCommunity, community.DeleteCommunity,
crypto.Encrypt, crypto.Decrypt,
service.Start, service.Stop, service.Restart]
CMD_DICT = {cmd.name: cmd for cmd in CMD}
| 0 | 0 | 0 |
f2270f05625ebb16bc66a83c2b28a35118f9b92a | 11,918 | py | Python | utils.py | duanwenbo/Dynamic-Programming | f3c6dd5ac88fc961879ef3b150a61a8d05484773 | [
"MIT"
] | null | null | null | utils.py | duanwenbo/Dynamic-Programming | f3c6dd5ac88fc961879ef3b150a61a8d05484773 | [
"MIT"
] | null | null | null | utils.py | duanwenbo/Dynamic-Programming | f3c6dd5ac88fc961879ef3b150a61a8d05484773 | [
"MIT"
] | null | null | null | #!/user/bin/env python
# Author: Wenbo Duan
# Email: pv19120@bristol.ac.uk
# Time: 09/12/2021
# File: utils.py
# Supplementary codes for recording results into .txt files for both network and capital budget problems in dynamic programming.
import pandas as pd
import itertools
import re
from copy import deepcopy
def record_process(value_table, decision_table, n_stage, current_stage):
"""This is used for displaying and storing the result from dynamic programming"""
# beautify the format
# fill the list by negative element
for i, value_list in enumerate(value_table[:-1]):
value_table[i] = [
*value_table[i],
*[-1] * (n_stage - len(value_list)),
]
for i, decision_list in enumerate(decision_table):
decision_table[i] = [
*decision_table[i], *[-1] * (n_stage - len(decision_list))
]
# filling the table by negative element
null_fill = [-1] * n_stage
for _ in range(n_stage - len(value_table)):
value_table.insert(0, null_fill)
for _ in range(n_stage - len(decision_table) - 1):
decision_table.insert(0, null_fill)
decision_table.append([0] * n_stage)
# sythesis decision and value as a whole table
display_table = []
for i in range(len(decision_table)):
display_table.append(list(zip(decision_table[i], value_table[i])))
# format the display table
for i in display_table:
i.reverse()
df = pd.DataFrame(display_table)
df = df.transpose()
# rename column
header = [
"(d{0}(S), V{0}(S))".format(i) for i in range(len(display_table))
]
df.columns = header
# rename row
index_mapper = {}
for i in range(df.shape[0]):
index_mapper[i] = str(df.shape[0] - i - 1)
df = df.rename(index_mapper, axis='index')
df.loc["stage"] = ["{}".format(i) for i in range(df.shape[0])]
txt = str(df).replace(
"(-1, -1)",
"________|") # .replace("))","*").replace(")",")|").replace("*","))|")
txt = re.sub(r'(?<=\d)\)', ")|", txt)
txt = re.sub(r'\)\)', "))|", txt)
txt = txt.replace("(0, 0)", " 0")
with open("lognetwork.txt", 'a+') as f:
f.write(
"____________________________________________________________________________________\n"
)
f.write(txt)
f.write(
"\n____________________________________________________________________________________"
)
f.write("\n\n\n")
print(
"\n\n\n____________________________________________________________________________________"
)
print(txt)
routes_n = len(re.findall(r'or', txt))
if current_stage == 1:
# when the back recursion is finished, create the output file
with open("solutionnetwork.txt", "a+") as f:
f.write(
"The optimal decisions and associated values table is:\n\n ")
f.write(txt)
f.write("\n\nAs illustrated from the table:\n")
if routes_n == 0:
f.write("- There is 1 optimal route\n")
else:
f.write("- There are {} optimal routes\n".format(routes_n + 1))
f.write("- The optimal cost is {}\n".format(value_table[0][0]))
# extra text clearing indicating the nature of the solution
if routes_n > 0:
route_map = list(
set(
itertools.permutations(
[*["U"] * routes_n, *["D"] * routes_n], routes_n)))
assert len(route_map) == 2**routes_n, "check your routmap!"
for i, route in enumerate(route_map):
with open("solutionnetwork.txt", "a+") as f:
f.write("\n- Optimal route {}\n".format(i + 1))
multi_decision_count = 0
stage = 0
state = 0
for _ in range(len(decision_table) - 1):
next_decision = decision_table[stage][state]
if "or" in next_decision:
next_decision = route[multi_decision_count]
multi_decision_count += 1
if next_decision == "U":
with open("solutionnetwork.txt", "a+") as f:
f.write(
"Turning {} from node {} to node {}\n".format(
"UP", (stage, state),
(stage + 1, state + 1)))
state += 1
else:
with open("solutionnetwork.txt", "a+") as f:
f.write(
"Turning {} from node {} to node {}\n".format(
"DOWN", (stage, state),
(stage + 1, state)))
stage += 1
with open("solutionnetwork.txt", "a+") as f:
f.write("At a total cost of {}\n".format(
value_table[0][0]))
else:
stage = 0
state = 0
with open("solutionnetwork.txt", "a+") as f:
f.write("\n- Optimal route:\n")
for _ in range(len(decision_table) - 1):
next_decision = decision_table[stage][state]
if next_decision == "U":
with open("solutionnetwork.txt", "a+") as f:
f.write("Turning {} from node {} to node {}\n".format(
"UP", (stage, state), (stage + 1, state + 1)))
state += 1
else:
with open("solutionnetwork.txt", "a+") as f:
f.write("Turning {} from node {} to node {}\n".format(
"DOWN", (stage, state), (stage + 1, state)))
stage += 1
with open("solutionnetwork.txt", "a+") as f:
f.write("At a total cost of {}\n".format(value_table[0][0]))
with open("solutionnetwork.txt", "r") as f:
contents = f.read()
print(
"\n\n\n Analyzing the final result...\n####################################################"
)
print(contents)
def record_process_bud(value_table: pd.DataFrame, stage: int):
"""This is used for displaying and storing the process of captial budget problem"""
# created a head
table = deepcopy(value_table)
length = table.shape[1] // 2
head = list(
zip(["d_{}(S)".format(i + 1) for i in range(length)],
["V_{}(S)".format(i + 1) for i in range(length)]))
head = [head_name for tup in head for head_name in tup]
table.columns = head
table = table.replace(0, "______")
table = table.dropna(how="all")
with open("logcapbud.txt", "a+") as f:
f.write("Stage {} completed\n".format(stage // 2 + 1))
f.write(str(table))
f.write("\n\n\n")
def record_result_bud(value_table: pd.DataFrame, plan: pd.DataFrame,
budget: int):
"""This is used for displaying and storing the result of captial budget problem"""
table = deepcopy(value_table)
_budget = deepcopy(budget)
length = table.shape[1] // 2
head = list(
zip(["d_{}(S)".format(i + 1) for i in range(length)],
["V_{}(S)".format(i + 1) for i in range(length)]))
head = [head_name for tup in head for head_name in tup]
table.columns = head
table = table.replace(0, "______")
stages = value_table.shape[1] // 2
with open("solutioncapbud.txt", "a+") as f:
f.write("The optimal decisions and associated values table is:\n\n")
f.write(str(table))
f.write("\n\n")
f.write(
" \nwe can find the solution to the original problem by working backwards through the table.\n\n"
)
f.write(
"Since the capital available for the {} stages are {}m Pounds, we can:\n"
.format(stages, _budget))
# Analysis result:
multi_path_list = [] # [(stage, another path), (stage, another path)]
decision_route = []
for i in range(stages * 2, 0, -2):
stage_dispaly = i // 2 # i.e. 1,2,3
stage_index = i - 2 # i.e. 0,2,4
cell_value = table.loc[_budget, head[stage_index]]
last_plan = _detect_multiple(cell_value, stage_index)
decision_route.append(last_plan + 1)
last_cost = int(plan.loc[last_plan, stage_index])
last_buget = _budget - last_cost
with open("solutioncapbud.txt", "a+") as f:
f.write("- Looking up d_{}({}) and find the optimal decision {}\n".
format(stage_dispaly, _budget, last_plan + 1))
if stage_dispaly != 1:
f.write(
"- Implementing plan {} for subsdiray {}, leaving state {}-{}={} for subsdiray {}\n"
.format(last_plan + 1, stage_dispaly, _budget, last_cost,
last_buget, stage_dispaly - 1))
_budget = last_buget
with open("solutioncapbud.txt", "a+") as f:
f.write("- This is gives decision sequence d = {}".format(
list(reversed(decision_route))))
f.write("\n- The expected returns would be {}m Pounds\n\n".format(
value_table.iloc[-1, -1]))
# Multi paths
if len(multi_path_list) != 0:
with open("solutioncapbud.txt", "a+") as f:
f.write("\n\nAlternatively:\n\n")
__budget = deepcopy(budget)
for index, flag in enumerate(multi_path_list):
_stage_index, _decision = flag
decision_route = []
for i in range(stages * 2, 0, -2):
stage_dispaly = i // 2 # i.e. 1,2,3
stage_index = i - 2 # i.e. 0,2,4
cell_value = table.loc[__budget, head[stage_index]]
if _stage_index == stage_index:
last_plan = _decision
multi_path_list.pop(index)
else:
last_plan = _detect_multiple(cell_value, stage_index)
decision_route.append(last_plan + 1)
last_cost = int(plan.loc[last_plan, stage_index])
last_buget = __budget - last_cost
with open("solutioncapbud.txt", "a+") as f:
f.write(
"- Looking up d_{}({}) and find the optimal decision {}\n"
.format(stage_dispaly, __budget, last_plan + 1))
if stage_dispaly != 1:
f.write(
"- Implementing plan {} for subsdiray {}, leaving state {}-{}={} for subsdiray {}\n"
.format(last_plan + 1, stage_dispaly, __budget,
last_cost, last_buget, stage_dispaly - 1))
__budget = last_buget
with open("solutioncapbud.txt", "a+") as f:
f.write("- This is gives decision sequence d = {}".format(
list(reversed(decision_route))))
f.write(
"\n- The expected returns would be {}m Pounds\n\n".format(
value_table.iloc[-1, -1]))
if __name__ == "__main__":
pass | 43.816176 | 129 | 0.514264 | #!/user/bin/env python
# Author: Wenbo Duan
# Email: pv19120@bristol.ac.uk
# Time: 09/12/2021
# File: utils.py
# Supplementary codes for recording results into .txt files for both network and capital budget problems in dynamic programming.
import pandas as pd
import itertools
import re
from copy import deepcopy
def record_process(value_table, decision_table, n_stage, current_stage):
"""This is used for displaying and storing the result from dynamic programming"""
# beautify the format
# fill the list by negative element
for i, value_list in enumerate(value_table[:-1]):
value_table[i] = [
*value_table[i],
*[-1] * (n_stage - len(value_list)),
]
for i, decision_list in enumerate(decision_table):
decision_table[i] = [
*decision_table[i], *[-1] * (n_stage - len(decision_list))
]
# filling the table by negative element
null_fill = [-1] * n_stage
for _ in range(n_stage - len(value_table)):
value_table.insert(0, null_fill)
for _ in range(n_stage - len(decision_table) - 1):
decision_table.insert(0, null_fill)
decision_table.append([0] * n_stage)
# sythesis decision and value as a whole table
display_table = []
for i in range(len(decision_table)):
display_table.append(list(zip(decision_table[i], value_table[i])))
# format the display table
for i in display_table:
i.reverse()
df = pd.DataFrame(display_table)
df = df.transpose()
# rename column
header = [
"(d{0}(S), V{0}(S))".format(i) for i in range(len(display_table))
]
df.columns = header
# rename row
index_mapper = {}
for i in range(df.shape[0]):
index_mapper[i] = str(df.shape[0] - i - 1)
df = df.rename(index_mapper, axis='index')
df.loc["stage"] = ["{}".format(i) for i in range(df.shape[0])]
txt = str(df).replace(
"(-1, -1)",
"________|") # .replace("))","*").replace(")",")|").replace("*","))|")
txt = re.sub(r'(?<=\d)\)', ")|", txt)
txt = re.sub(r'\)\)', "))|", txt)
txt = txt.replace("(0, 0)", " 0")
with open("lognetwork.txt", 'a+') as f:
f.write(
"____________________________________________________________________________________\n"
)
f.write(txt)
f.write(
"\n____________________________________________________________________________________"
)
f.write("\n\n\n")
print(
"\n\n\n____________________________________________________________________________________"
)
print(txt)
routes_n = len(re.findall(r'or', txt))
if current_stage == 1:
# when the back recursion is finished, create the output file
with open("solutionnetwork.txt", "a+") as f:
f.write(
"The optimal decisions and associated values table is:\n\n ")
f.write(txt)
f.write("\n\nAs illustrated from the table:\n")
if routes_n == 0:
f.write("- There is 1 optimal route\n")
else:
f.write("- There are {} optimal routes\n".format(routes_n + 1))
f.write("- The optimal cost is {}\n".format(value_table[0][0]))
# extra text clearing indicating the nature of the solution
if routes_n > 0:
route_map = list(
set(
itertools.permutations(
[*["U"] * routes_n, *["D"] * routes_n], routes_n)))
assert len(route_map) == 2**routes_n, "check your routmap!"
for i, route in enumerate(route_map):
with open("solutionnetwork.txt", "a+") as f:
f.write("\n- Optimal route {}\n".format(i + 1))
multi_decision_count = 0
stage = 0
state = 0
for _ in range(len(decision_table) - 1):
next_decision = decision_table[stage][state]
if "or" in next_decision:
next_decision = route[multi_decision_count]
multi_decision_count += 1
if next_decision == "U":
with open("solutionnetwork.txt", "a+") as f:
f.write(
"Turning {} from node {} to node {}\n".format(
"UP", (stage, state),
(stage + 1, state + 1)))
state += 1
else:
with open("solutionnetwork.txt", "a+") as f:
f.write(
"Turning {} from node {} to node {}\n".format(
"DOWN", (stage, state),
(stage + 1, state)))
stage += 1
with open("solutionnetwork.txt", "a+") as f:
f.write("At a total cost of {}\n".format(
value_table[0][0]))
else:
stage = 0
state = 0
with open("solutionnetwork.txt", "a+") as f:
f.write("\n- Optimal route:\n")
for _ in range(len(decision_table) - 1):
next_decision = decision_table[stage][state]
if next_decision == "U":
with open("solutionnetwork.txt", "a+") as f:
f.write("Turning {} from node {} to node {}\n".format(
"UP", (stage, state), (stage + 1, state + 1)))
state += 1
else:
with open("solutionnetwork.txt", "a+") as f:
f.write("Turning {} from node {} to node {}\n".format(
"DOWN", (stage, state), (stage + 1, state)))
stage += 1
with open("solutionnetwork.txt", "a+") as f:
f.write("At a total cost of {}\n".format(value_table[0][0]))
with open("solutionnetwork.txt", "r") as f:
contents = f.read()
print(
"\n\n\n Analyzing the final result...\n####################################################"
)
print(contents)
def record_process_bud(value_table: pd.DataFrame, stage: int):
"""This is used for displaying and storing the process of captial budget problem"""
# created a head
table = deepcopy(value_table)
length = table.shape[1] // 2
head = list(
zip(["d_{}(S)".format(i + 1) for i in range(length)],
["V_{}(S)".format(i + 1) for i in range(length)]))
head = [head_name for tup in head for head_name in tup]
table.columns = head
table = table.replace(0, "______")
table = table.dropna(how="all")
with open("logcapbud.txt", "a+") as f:
f.write("Stage {} completed\n".format(stage // 2 + 1))
f.write(str(table))
f.write("\n\n\n")
def record_result_bud(value_table: pd.DataFrame, plan: pd.DataFrame,
budget: int):
"""This is used for displaying and storing the result of captial budget problem"""
table = deepcopy(value_table)
_budget = deepcopy(budget)
length = table.shape[1] // 2
head = list(
zip(["d_{}(S)".format(i + 1) for i in range(length)],
["V_{}(S)".format(i + 1) for i in range(length)]))
head = [head_name for tup in head for head_name in tup]
table.columns = head
table = table.replace(0, "______")
stages = value_table.shape[1] // 2
with open("solutioncapbud.txt", "a+") as f:
f.write("The optimal decisions and associated values table is:\n\n")
f.write(str(table))
f.write("\n\n")
f.write(
" \nwe can find the solution to the original problem by working backwards through the table.\n\n"
)
f.write(
"Since the capital available for the {} stages are {}m Pounds, we can:\n"
.format(stages, _budget))
def _detect_multiple(cell_value, stage_index):
if type(cell_value) == str:
cell_value = cell_value.replace(' ', '').split('or')
cell_value = [int(i) for i in cell_value]
multi_path_list.append((stage_index, cell_value[1] - 1))
return cell_value[0] - 1
else:
return cell_value - 1
# Analysis result:
multi_path_list = [] # [(stage, another path), (stage, another path)]
decision_route = []
for i in range(stages * 2, 0, -2):
stage_dispaly = i // 2 # i.e. 1,2,3
stage_index = i - 2 # i.e. 0,2,4
cell_value = table.loc[_budget, head[stage_index]]
last_plan = _detect_multiple(cell_value, stage_index)
decision_route.append(last_plan + 1)
last_cost = int(plan.loc[last_plan, stage_index])
last_buget = _budget - last_cost
with open("solutioncapbud.txt", "a+") as f:
f.write("- Looking up d_{}({}) and find the optimal decision {}\n".
format(stage_dispaly, _budget, last_plan + 1))
if stage_dispaly != 1:
f.write(
"- Implementing plan {} for subsdiray {}, leaving state {}-{}={} for subsdiray {}\n"
.format(last_plan + 1, stage_dispaly, _budget, last_cost,
last_buget, stage_dispaly - 1))
_budget = last_buget
with open("solutioncapbud.txt", "a+") as f:
f.write("- This is gives decision sequence d = {}".format(
list(reversed(decision_route))))
f.write("\n- The expected returns would be {}m Pounds\n\n".format(
value_table.iloc[-1, -1]))
# Multi paths
if len(multi_path_list) != 0:
with open("solutioncapbud.txt", "a+") as f:
f.write("\n\nAlternatively:\n\n")
__budget = deepcopy(budget)
for index, flag in enumerate(multi_path_list):
_stage_index, _decision = flag
decision_route = []
for i in range(stages * 2, 0, -2):
stage_dispaly = i // 2 # i.e. 1,2,3
stage_index = i - 2 # i.e. 0,2,4
cell_value = table.loc[__budget, head[stage_index]]
if _stage_index == stage_index:
last_plan = _decision
multi_path_list.pop(index)
else:
last_plan = _detect_multiple(cell_value, stage_index)
decision_route.append(last_plan + 1)
last_cost = int(plan.loc[last_plan, stage_index])
last_buget = __budget - last_cost
with open("solutioncapbud.txt", "a+") as f:
f.write(
"- Looking up d_{}({}) and find the optimal decision {}\n"
.format(stage_dispaly, __budget, last_plan + 1))
if stage_dispaly != 1:
f.write(
"- Implementing plan {} for subsdiray {}, leaving state {}-{}={} for subsdiray {}\n"
.format(last_plan + 1, stage_dispaly, __budget,
last_cost, last_buget, stage_dispaly - 1))
__budget = last_buget
with open("solutioncapbud.txt", "a+") as f:
f.write("- This is gives decision sequence d = {}".format(
list(reversed(decision_route))))
f.write(
"\n- The expected returns would be {}m Pounds\n\n".format(
value_table.iloc[-1, -1]))
if __name__ == "__main__":
pass | 341 | 0 | 29 |
498fa1348e6d550566bfb2df46f3e39c700d5459 | 4,893 | py | Python | data_processing/sliding_window.py | mariusbock/dl-for-har | d331e4234b5c204848d1fc852cb7a5221414b509 | [
"MIT"
] | 21 | 2021-08-16T17:09:17.000Z | 2022-03-31T20:36:56.000Z | data_processing/sliding_window.py | mariusbock/dl-for-har | d331e4234b5c204848d1fc852cb7a5221414b509 | [
"MIT"
] | 2 | 2021-09-20T10:50:27.000Z | 2021-12-03T11:55:07.000Z | data_processing/sliding_window.py | mariusbock/dl-for-har | d331e4234b5c204848d1fc852cb7a5221414b509 | [
"MIT"
] | 13 | 2021-08-05T05:33:11.000Z | 2022-03-30T13:33:26.000Z | ##################################################
# All functions related to applying sliding window on a dataset
##################################################
# Author: Marius Bock
# Email: marius.bock(at)uni-siegen.de
##################################################
import numpy as np
def sliding_window_seconds(data, length_in_seconds=1, sampling_rate=50, overlap_ratio=None):
"""
Return a sliding window measured in seconds over a data array.
:param data: dataframe
Input array, can be numpy or pandas dataframe
:param length_in_seconds: int, default: 1
Window length as seconds
:param sampling_rate: int, default: 50
Sampling rate in hertz as integer value
:param overlap_ratio: int, default: None
Overlap is meant as percentage and should be an integer value
:return: tuple of windows and indices
"""
windows = []
indices = []
curr = 0
overlapping_elements = 0
win_len = int(length_in_seconds * sampling_rate)
if overlap_ratio is not None:
overlapping_elements = int((overlap_ratio / 100) * win_len)
if overlapping_elements >= win_len:
print('Number of overlapping elements exceeds window size.')
return
while curr < len(data) - win_len:
windows.append(data[curr:curr + win_len])
indices.append([curr, curr + win_len])
curr = curr + win_len - overlapping_elements
return np.array(windows), np.array(indices)
def sliding_window_samples(data, samples_per_window, overlap_ratio):
"""
Return a sliding window measured in number of samples over a data array.
:param data: dataframe
Input array, can be numpy or pandas dataframe
:param samples_per_window: int
Window length as number of samples per window
:param overlap_ratio: int
Overlap is meant as percentage and should be an integer value
:return: dataframe, list
Tuple of windows and indices
"""
windows = []
indices = []
curr = 0
win_len = int(samples_per_window)
if overlap_ratio is not None:
overlapping_elements = int((overlap_ratio / 100) * (win_len))
if overlapping_elements >= win_len:
print('Number of overlapping elements exceeds window size.')
return
while curr < len(data) - win_len:
windows.append(data[curr:curr + win_len])
indices.append([curr, curr + win_len])
curr = curr + win_len - overlapping_elements
try:
result_windows = np.array(windows)
result_indices = np.array(indices)
except:
result_windows = np.empty(shape=(len(windows), win_len, data.shape[1]), dtype=object)
result_indices = np.array(indices)
for i in range(0, len(windows)):
result_windows[i] = windows[i]
result_indices[i] = indices[i]
return result_windows, result_indices
def apply_sliding_window(data_x, data_y, sliding_window_size, unit, sampling_rate, sliding_window_overlap):
"""
Function which transforms a dataset into windows of a specific size and overlap.
:param data_x: numpy float array
Array containing the features (can be 2D)
:param data_y: numpy float array
Array containing the corresponding labels to the dataset (is 1D)
:param sliding_window_size: integer or float
Size of each window (either in seconds or units)
:param unit: string, ['units', 'seconds']
Unit in which the sliding window is measured
:param sampling_rate: integer
Number of hertz in which the dataset is sampled
:param sliding_window_overlap: integer
Amount of overlap between the sliding windows (measured in percentage, e.g. 20 is 20%)
:return:
"""
full_data = np.concatenate((data_x, data_y[:, None]), axis=1)
output_x = None
output_y = None
for i, subject in enumerate(np.unique(full_data[:, 0])):
subject_data = full_data[full_data[:, 0] == subject]
subject_x, subject_y = subject_data[:, :-1], subject_data[:, -1]
if unit == 'units':
tmp_x, _ = sliding_window_samples(subject_x, sliding_window_size, sliding_window_overlap)
tmp_y, _ = sliding_window_samples(subject_y, sliding_window_size, sliding_window_overlap)
elif unit == 'seconds':
tmp_x, _ = sliding_window_seconds(subject_x, sliding_window_size, sampling_rate, sliding_window_overlap)
tmp_y, _ = sliding_window_seconds(subject_y, sliding_window_size, sampling_rate, sliding_window_overlap)
if output_x is None:
output_x = tmp_x
output_y = tmp_y
else:
output_x = np.concatenate((output_x, tmp_x), axis=0)
output_y = np.concatenate((output_y, tmp_y), axis=0)
output_y = [[i[-1]] for i in output_y]
return output_x, np.array(output_y).flatten()
| 41.117647 | 116 | 0.651747 | ##################################################
# All functions related to applying sliding window on a dataset
##################################################
# Author: Marius Bock
# Email: marius.bock(at)uni-siegen.de
##################################################
import numpy as np
def sliding_window_seconds(data, length_in_seconds=1, sampling_rate=50, overlap_ratio=None):
"""
Return a sliding window measured in seconds over a data array.
:param data: dataframe
Input array, can be numpy or pandas dataframe
:param length_in_seconds: int, default: 1
Window length as seconds
:param sampling_rate: int, default: 50
Sampling rate in hertz as integer value
:param overlap_ratio: int, default: None
Overlap is meant as percentage and should be an integer value
:return: tuple of windows and indices
"""
windows = []
indices = []
curr = 0
overlapping_elements = 0
win_len = int(length_in_seconds * sampling_rate)
if overlap_ratio is not None:
overlapping_elements = int((overlap_ratio / 100) * win_len)
if overlapping_elements >= win_len:
print('Number of overlapping elements exceeds window size.')
return
while curr < len(data) - win_len:
windows.append(data[curr:curr + win_len])
indices.append([curr, curr + win_len])
curr = curr + win_len - overlapping_elements
return np.array(windows), np.array(indices)
def sliding_window_samples(data, samples_per_window, overlap_ratio):
"""
Return a sliding window measured in number of samples over a data array.
:param data: dataframe
Input array, can be numpy or pandas dataframe
:param samples_per_window: int
Window length as number of samples per window
:param overlap_ratio: int
Overlap is meant as percentage and should be an integer value
:return: dataframe, list
Tuple of windows and indices
"""
windows = []
indices = []
curr = 0
win_len = int(samples_per_window)
if overlap_ratio is not None:
overlapping_elements = int((overlap_ratio / 100) * (win_len))
if overlapping_elements >= win_len:
print('Number of overlapping elements exceeds window size.')
return
while curr < len(data) - win_len:
windows.append(data[curr:curr + win_len])
indices.append([curr, curr + win_len])
curr = curr + win_len - overlapping_elements
try:
result_windows = np.array(windows)
result_indices = np.array(indices)
except:
result_windows = np.empty(shape=(len(windows), win_len, data.shape[1]), dtype=object)
result_indices = np.array(indices)
for i in range(0, len(windows)):
result_windows[i] = windows[i]
result_indices[i] = indices[i]
return result_windows, result_indices
def apply_sliding_window(data_x, data_y, sliding_window_size, unit, sampling_rate, sliding_window_overlap):
"""
Function which transforms a dataset into windows of a specific size and overlap.
:param data_x: numpy float array
Array containing the features (can be 2D)
:param data_y: numpy float array
Array containing the corresponding labels to the dataset (is 1D)
:param sliding_window_size: integer or float
Size of each window (either in seconds or units)
:param unit: string, ['units', 'seconds']
Unit in which the sliding window is measured
:param sampling_rate: integer
Number of hertz in which the dataset is sampled
:param sliding_window_overlap: integer
Amount of overlap between the sliding windows (measured in percentage, e.g. 20 is 20%)
:return:
"""
full_data = np.concatenate((data_x, data_y[:, None]), axis=1)
output_x = None
output_y = None
for i, subject in enumerate(np.unique(full_data[:, 0])):
subject_data = full_data[full_data[:, 0] == subject]
subject_x, subject_y = subject_data[:, :-1], subject_data[:, -1]
if unit == 'units':
tmp_x, _ = sliding_window_samples(subject_x, sliding_window_size, sliding_window_overlap)
tmp_y, _ = sliding_window_samples(subject_y, sliding_window_size, sliding_window_overlap)
elif unit == 'seconds':
tmp_x, _ = sliding_window_seconds(subject_x, sliding_window_size, sampling_rate, sliding_window_overlap)
tmp_y, _ = sliding_window_seconds(subject_y, sliding_window_size, sampling_rate, sliding_window_overlap)
if output_x is None:
output_x = tmp_x
output_y = tmp_y
else:
output_x = np.concatenate((output_x, tmp_x), axis=0)
output_y = np.concatenate((output_y, tmp_y), axis=0)
output_y = [[i[-1]] for i in output_y]
return output_x, np.array(output_y).flatten()
| 0 | 0 | 0 |
f8c0ad691c5eb36ea710c21dd6c6b785b3407eb5 | 19,464 | py | Python | nbs/10_tutorial.pets.py | huangyingw/fastai_fastai | 68ff76a21b2f70f44fb91885abcb73c1c213ec1a | [
"Apache-2.0"
] | 1 | 2022-02-06T22:11:10.000Z | 2022-02-06T22:11:10.000Z | nbs/10_tutorial.pets.py | huangyingw/fastai_fastai | 68ff76a21b2f70f44fb91885abcb73c1c213ec1a | [
"Apache-2.0"
] | null | null | null | nbs/10_tutorial.pets.py | huangyingw/fastai_fastai | 68ff76a21b2f70f44fb91885abcb73c1c213ec1a | [
"Apache-2.0"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hide
# skip
from fastai.vision.all import *
! [-e / content] & & pip install - Uqq fastai # upgrade fastai on colab
# # Tutorial - Assemble the data on the pets dataset
#
# > Using `Datasets`, `Pipeline`, `TfmdLists` and `Transform` in computer vision
# ## Overview
# In this tutorial, we look in depth at the middle level API for collecting data in computer vision. First we will see how to use:
#
# - `Transform` to process the data
# - `Pipeline` to composes transforms
#
# Those are just functions with added functionality. For dataset processing, we will look in a second part at
#
# - `TfmdLists` to apply one `Pipeline` of `Tranform`s on a collection of items
# - `Datasets` to apply several `Pipeline` of `Transform`s on a collection of items in parallel and produce tuples
#
# The general rule is to use `TfmdLists` when your transforms will output the tuple (input,target) and `Datasets` when you build separate `Pipeline`s for each of your input(s)/target(s).
#
# After this tutorial, you might be interested by the [siamese tutorial](http://docs.fast.ai/tutorial.siamese) that goes even more in depth in the data APIs, showing you how to write your custom types and how to customize the behavior of `show_batch` and `show_results`.
# ## Processing data
# Cleaning and processing data is one of the most time-consuming things in machine learning, which is why fastai tries to help you as much as it can. At its core, preparing the data for your model can be formalized as a sequence of transformations you apply to some raw items. For instance, in a classic image classification problem, we start with filenames. We have to open the corresponding images, resize them, convert them to tensors, maybe apply some kind of data augmentation, before we are ready to batch them. And that's just for the inputs of our model, for the targets, we need to extract the label of our filename and convert it to an integer.
#
# This process needs to be somewhat reversible, because we often want to inspect our data to double check what we feed the model actually makes sense. That's why fastai represents all those operations by `Transform`s, which you can sometimes undo with a `decode` method.
# ### Transform
# First we'll have a look at the basic steps using a single MNIST image. We'll start with a filename, and see step by step how it can be converted in to a labelled image that can be displayed and used for modeling. We use the usual `untar_data` to download our dataset (if necessary) and get all the image files:
source = untar_data(URLs.MNIST_TINY) / 'train'
items = get_image_files(source)
fn = items[0]
fn
# We'll look at each `Transform` needed in turn. Here's how we can open an image file:
img = PILImage.create(fn)
img
# Then we can convert it to a `C*H*W` tensor (for channel x height x width, which is the convention in PyTorch):
tconv = ToTensor()
img = tconv(img)
img.shape, type(img)
# Now that's done, we can create our labels. First extracting the text label:
lbl = parent_label(fn)
lbl
# And then converting to an int for modeling:
tcat = Categorize(vocab=['3', '7'])
lbl = tcat(lbl)
lbl
# We use `decode` to reverse transforms for display. Reversing the `Categorize` transform result in a class name we can display:
lbld = tcat.decode(lbl)
lbld
# ### Pipeline
# We can compose our image steps using `Pipeline`:
pipe = Pipeline([PILImage.create, tconv])
img = pipe(fn)
img.shape
# A `Pipeline` can decode and show an item.
pipe.show(img, figsize=(1, 1), cmap='Greys')
# The show method works behind the scenes with types. Transforms will make sure the type of an element they receive is preserved. Here `PILImage.create` returns a `PILImage`, which knows how to show itself. `tconv` converts it to a `TensorImage`, which also knows how to show itself.
type(img)
# Those types are also used to enable different behaviors depending on the input received (for instance you don't do data augmentation the same way on an image, a segmentation mask or a bounding box).
# ## Loading the pets dataset using only `Transform`
# Let's see how to use `fastai.data` to process the Pets dataset. If you are used to writing your own PyTorch `Dataset`s, what will feel more natural is to write everything in one `Transform`. We use *source* to refer to the underlying source of our data (e.g. a directory on disk, a database connection, a network connection, etc). Then we grab the items.
source = untar_data(URLs.PETS) / "images"
items = get_image_files(source)
# We'll use this function to create consistently sized tensors from image files:
# Before we can create a `Transform`, we need a type that knows how to show itself (if we want to use the show method). Here we define a `TitledImage`:
# Let's check it works:
img = resized_image(items[0])
TitledImage(img, 'test title').show()
# ### Using decodes for showing processed data
# To decode data for showing purposes (like de-normalizing an image or converting back an index to its corresponding class), we implement a <code>decodes</code> method inside a `Transform`.
# The `Transform` opens and resizes the images on one side, label it and convert that label to an index using `o2i` on the other side. Inside the <code>decodes</code> method, we decode the index using the `vocab`. The image is left as is (we can't really show a filename!).
#
# To use this `Transform`, we need a label function. Here we use a regex on the `name` attribute of our filenames:
labeller = using_attr(RegexLabeller(pat=r'^(.*)_\d+.jpg$'), 'name')
# Then we gather all the possible labels, uniqueify them and ask for the two correspondences (vocab and o2i) using `bidir=True`. We can then use them to build our pet transform.
vals = list(map(labeller, items))
vocab, o2i = uniqueify(vals, sort=True, bidir=True)
pets = PetTfm(vocab, o2i, labeller)
# We can check how it's applied to a filename:
x, y = pets(items[0])
x.shape, y
# And we can decode our transformed version and show it:
dec = pets.decode([x, y])
dec.show()
# Note that like `__call__ ` and <code>encodes</code>, we implemented a <code>decodes</code> method but we actually call `decode` on our `Transform`.
#
# Also note that our <code>decodes</code> method received the two objects (x and y). We said in the previous section `Transform` dispatch over tuples (for the encoding as well as the decoding) but here it took our two elements as a whole and did not try to decode x and y separately. Why is that? It's because we pass a list `[x,y]` to decodes. `Transform`s dispatch over tuples, but tuples only. And as we saw as well, to prevent a `Transform` from dispatching over a tuple, we just have to make it an `ItemTransform`:
dec = pets.decode(pets(items[0]))
dec.show()
# ### Setting up the internal state with a setups
# We can now let's make our `ItemTransform` automatically state its state form the data. This way, when we combine together our `Transform` with the data, it will automatically get setup without having to do anything. This is very easy to do: just copy the lines we had before to build the categories inside the transform in a <code>setups</code> method:
# Now we can create our `Transform`, call its setup, and it will be ready to be used:
pets = PetTfm()
pets.setup(items)
x, y = pets(items[0])
x.shape, y
# And like before, there is no problem to decode it:
dec = pets.decode((x, y))
dec.show()
# ### Combining our `Transform` with data augmentation in a `Pipeline`.
# We can take advantage of fastai's data augmentation transforms if we give the right type to our elements. Instead of returning a standard `PIL.Image`, if our transform returns the fastai type `PILImage`, we can then use any fastai's transform with it. Let's just return a `PILImage` for our first element:
# We can then combine that transform with `ToTensor`, `Resize` or `FlipItem` to randomly flip our image in a `Pipeline`:
tfms = Pipeline([PetTfm(), Resize(224), FlipItem(p=1), ToTensor()])
# Calling `setup` on a `Pipeline` will set each transform in order:
tfms.setup(items)
# To check the setup was done properly, we want to see if we did build the vocab. One cool trick of `Pipeline` is that when asking for an attribute, it will look through each of its `Transform`s for that attribute and give you the result (or the list of results if the attribute is in multiple transforms):
tfms.vocab
# Then we can call our pipeline:
x, y = tfms(items[0])
x.shape, y
# We can see `ToTensor` and `Resize` were applied to the first element of our tuple (which was of type `PILImage`) but not the second. We can even have a look at our element to check the flip was also applied:
tfms.show(tfms(items[0]))
# `Pipeline.show` will call decode on each `Transform` until it gets a type that knows how to show itself. The library considers a tuple as knowing how to show itself if all its parts have a `show` method. Here it does not happen before reaching `PetTfm` since the second part of our tuple is an int. But after decoding the original `PetTfm`, we get a `TitledImage` which has a `show` method.
#
# It's a good point to note that the `Transform`s of the `Pipeline` are sorted by their internal `order` attribute (with a default of `order=0`). You can always check the order in which the transforms are in a `Pipeline` by looking at its representation:
tfms
# Even if we define `tfms` with `Resize` before `FlipItem`, we can see they have been reordered because we have:
FlipItem.order, Resize.order
# To customize the order of a `Transform`, just set `order = ...` before the `__init__` (it's a class attribute). Let's make `PetTfm` of order -5 to be sure it's always run first:
# Then we can mess up the order of the transforms in our `Pipeline` but it will fix itself:
tfms = Pipeline([Resize(224), PetTfm(), FlipItem(p=1), ToTensor()])
tfms
# Now that we have a good `Pipeline` of transforms, let's add it to a list of filenames to build our dataset. A `Pipeline` combined with a collection is a `TfmdLists` in fastai.
# ## `TfmdLists` and `Datasets`
# The main difference between `TfmdLists` and `Datasets` is the number of `Pipeline`s you have: `TfmdLists` take one `Pipeline` to transform a list (like we currently have) whereas `Datasets` combines several `Pipeline`s in parallel to create a tuple from one set of raw items, for instance a tuple (input, target).
# ### One pipeline makes a `TfmdLists`
# Creating a `TfmdLists` just requires a list of items and a list of transforms that will be combined in a `Pipeline`:
tls = TfmdLists(items, [Resize(224), PetTfm(), FlipItem(p=0.5), ToTensor()])
x, y = tls[0]
x.shape, y
# We did not need to pass anything to `PetTfm` thanks to our setup method: the `Pipeline` was automatically setup on the `items` during the initialization, so `PetTfm` has created its vocab like before:
tls.vocab
# We can ask the `TfmdLists` to show the items we got:
tls.show((x, y))
# Or we have a shortcut with `show_at`:
show_at(tls, 0)
# ### Traning and validation set
# `TfmdLists` has an 's' in its name because it can represent several transformed lists: your training and validation sets. To use that functionality, we just need to pass `splits` to the initialization. `splits` should be a list of lists of indices (one list per set). To help create splits, we can use all the *splitters* of the fastai library:
splits = RandomSplitter(seed=42)(items)
splits
tls = TfmdLists(items, [Resize(224), PetTfm(), FlipItem(p=0.5), ToTensor()], splits=splits)
# Then your `tls` get a train and valid attributes (it also had them before, but the valid was empty and the train contained everything).
show_at(tls.train, 0)
# An interesting thing is that unless you pass `train_setup=False`, your transforms are setup on the training set only (which is best practices): the `items` received by <code>setups</code> are just the elements of the training set.
# ### Getting to `DataLoaders`
# From a `TfmdLists`, getting a `DataLoaders` object is very easy, you just have to call the `dataloaders` method:
dls = tls.dataloaders(bs=64)
# And `show_batch` will just *work*:
dls.show_batch()
# You can even add augmentation transforms, since we have a proper fastai typed image. Just remember to add the `IntToFloatTensor` transform that deals with the conversion of int to float (augmentation transforms of fastai on the GPU require float tensors). When calling `TfmdLists.dataloaders`, you pass the `batch_tfms` to `after_batch` (and potential new `item_tfms` to `after_item`):
dls = tls.dataloaders(bs=64, after_batch=[IntToFloatTensor(), *aug_transforms()])
dls.show_batch()
# ### Using `Datasets`
# `Datasets` applies a list of list of transforms (or list of `Pipeline`s) lazily to items of a collection, creating one output per list of transforms/`Pipeline`. This makes it easier for us to separate out steps of a process, so that we can re-use them and modify the process more easily. This is what lays the foundation of the data block API: we can easily mix and match types as inputs or outputs as they are associated to certain pipelines of transforms.
#
# For instance, let's write our own `ImageResizer` transform with two different implementations for images or masks:
# Specifying the type-annotations makes it so that our transform does nothing to thigns that are neither `PILImage` or `PILMask`, and resize images with `self.resample`, masks with the nearest neighbor interpolation. To create a `Datasets`, we then pass two pipelines of transforms, one for the input and one for the target:
tfms = [[PILImage.create, ImageResizer(128), ToTensor(), IntToFloatTensor()],
[labeller, Categorize()]]
dsets = Datasets(items, tfms)
# We can check that inputs and outputs have the right types:
t = dsets[0]
type(t[0]), type(t[1])
# We can decode and show using `dsets`:
x, y = dsets.decode(t)
x.shape, y
dsets.show(t)
# And we can pass our train/validation split like in `TfmdLists`:
dsets = Datasets(items, tfms, splits=splits)
# But we are not using the fact that `Transform`s dispatch over tuples here. `ImageResizer`, `ToTensor` and `IntToFloatTensor` could be passed as transforms over the tuple. This is done in `.dataloaders` by passing them to `after_item`. They won't do anything to the category but will only be applied to the inputs.
tfms = [[PILImage.create], [labeller, Categorize()]]
dsets = Datasets(items, tfms, splits=splits)
dls = dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# And we can check it works with `show_batch`:
dls.show_batch()
# If we just wanted to build one `DataLoader` from our `Datasets` (or the previous `TfmdLists`), you can pass it directly to `TfmdDL`:
dsets = Datasets(items, tfms)
dl = TfmdDL(dsets, bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# ### Segmentation
# By using the same transforms in `after_item` but a different kind of targets (here segmentation masks), the targets are automatically processed as they should with the type-dispatch system.
cv_source = untar_data(URLs.CAMVID_TINY)
cv_items = get_image_files(cv_source / 'images')
cv_splitter = RandomSplitter(seed=42)
cv_split = cv_splitter(cv_items)
tfms = [[PILImage.create], [cv_label, PILMask.create]]
cv_dsets = Datasets(cv_items, tfms, splits=cv_split)
dls = cv_dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
dls.show_batch(max_n=4)
# ## Adding a test dataloader for inference
# Let's take back our pets dataset...
tfms = [[PILImage.create], [labeller, Categorize()]]
dsets = Datasets(items, tfms, splits=splits)
dls = dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# ...and imagine we have some new files to classify.
path = untar_data(URLs.PETS)
tst_files = get_image_files(path / "images")
len(tst_files)
# We can create a dataloader that takes those files and applies the same transforms as the validation set with `DataLoaders.test_dl`:
tst_dl = dls.test_dl(tst_files)
tst_dl.show_batch(max_n=9)
# **Extra:**
# You can call `learn.get_preds` passing this newly created dataloaders to make predictions on our new images!
# What is really cool is that after you finished training your model, you can save it with `learn.export`, this is also going to save all the transforms that need to be applied to your data. In inference time you just need to load your learner with `load_learner` and you can immediately create a dataloader with `test_dl` to use it to generate new predictions!
# ## fin -
| 46.014184 | 654 | 0.731761 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hide
# skip
from fastai.vision.all import *
! [-e / content] & & pip install - Uqq fastai # upgrade fastai on colab
# # Tutorial - Assemble the data on the pets dataset
#
# > Using `Datasets`, `Pipeline`, `TfmdLists` and `Transform` in computer vision
# ## Overview
# In this tutorial, we look in depth at the middle level API for collecting data in computer vision. First we will see how to use:
#
# - `Transform` to process the data
# - `Pipeline` to composes transforms
#
# Those are just functions with added functionality. For dataset processing, we will look in a second part at
#
# - `TfmdLists` to apply one `Pipeline` of `Tranform`s on a collection of items
# - `Datasets` to apply several `Pipeline` of `Transform`s on a collection of items in parallel and produce tuples
#
# The general rule is to use `TfmdLists` when your transforms will output the tuple (input,target) and `Datasets` when you build separate `Pipeline`s for each of your input(s)/target(s).
#
# After this tutorial, you might be interested by the [siamese tutorial](http://docs.fast.ai/tutorial.siamese) that goes even more in depth in the data APIs, showing you how to write your custom types and how to customize the behavior of `show_batch` and `show_results`.
# ## Processing data
# Cleaning and processing data is one of the most time-consuming things in machine learning, which is why fastai tries to help you as much as it can. At its core, preparing the data for your model can be formalized as a sequence of transformations you apply to some raw items. For instance, in a classic image classification problem, we start with filenames. We have to open the corresponding images, resize them, convert them to tensors, maybe apply some kind of data augmentation, before we are ready to batch them. And that's just for the inputs of our model, for the targets, we need to extract the label of our filename and convert it to an integer.
#
# This process needs to be somewhat reversible, because we often want to inspect our data to double check what we feed the model actually makes sense. That's why fastai represents all those operations by `Transform`s, which you can sometimes undo with a `decode` method.
# ### Transform
# First we'll have a look at the basic steps using a single MNIST image. We'll start with a filename, and see step by step how it can be converted in to a labelled image that can be displayed and used for modeling. We use the usual `untar_data` to download our dataset (if necessary) and get all the image files:
source = untar_data(URLs.MNIST_TINY) / 'train'
items = get_image_files(source)
fn = items[0]
fn
# We'll look at each `Transform` needed in turn. Here's how we can open an image file:
img = PILImage.create(fn)
img
# Then we can convert it to a `C*H*W` tensor (for channel x height x width, which is the convention in PyTorch):
tconv = ToTensor()
img = tconv(img)
img.shape, type(img)
# Now that's done, we can create our labels. First extracting the text label:
lbl = parent_label(fn)
lbl
# And then converting to an int for modeling:
tcat = Categorize(vocab=['3', '7'])
lbl = tcat(lbl)
lbl
# We use `decode` to reverse transforms for display. Reversing the `Categorize` transform result in a class name we can display:
lbld = tcat.decode(lbl)
lbld
# ### Pipeline
# We can compose our image steps using `Pipeline`:
pipe = Pipeline([PILImage.create, tconv])
img = pipe(fn)
img.shape
# A `Pipeline` can decode and show an item.
pipe.show(img, figsize=(1, 1), cmap='Greys')
# The show method works behind the scenes with types. Transforms will make sure the type of an element they receive is preserved. Here `PILImage.create` returns a `PILImage`, which knows how to show itself. `tconv` converts it to a `TensorImage`, which also knows how to show itself.
type(img)
# Those types are also used to enable different behaviors depending on the input received (for instance you don't do data augmentation the same way on an image, a segmentation mask or a bounding box).
# ## Loading the pets dataset using only `Transform`
# Let's see how to use `fastai.data` to process the Pets dataset. If you are used to writing your own PyTorch `Dataset`s, what will feel more natural is to write everything in one `Transform`. We use *source* to refer to the underlying source of our data (e.g. a directory on disk, a database connection, a network connection, etc). Then we grab the items.
source = untar_data(URLs.PETS) / "images"
items = get_image_files(source)
# We'll use this function to create consistently sized tensors from image files:
def resized_image(fn: Path, sz=128):
x = Image.open(fn).convert('RGB').resize((sz, sz))
# Convert image to tensor for modeling
return tensor(array(x)).permute(2, 0, 1).float() / 255.
# Before we can create a `Transform`, we need a type that knows how to show itself (if we want to use the show method). Here we define a `TitledImage`:
class TitledImage(fastuple):
def show(self, ctx=None, **kwargs): show_titled_image(self, ctx=ctx, **kwargs)
# Let's check it works:
img = resized_image(items[0])
TitledImage(img, 'test title').show()
# ### Using decodes for showing processed data
# To decode data for showing purposes (like de-normalizing an image or converting back an index to its corresponding class), we implement a <code>decodes</code> method inside a `Transform`.
class PetTfm(Transform):
def __init__(self, vocab, o2i, lblr): self.vocab, self.o2i, self.lblr = vocab, o2i, lblr
def encodes(self, o): return [resized_image(o), self.o2i[self.lblr(o)]]
def decodes(self, x): return TitledImage(x[0], self.vocab[x[1]])
# The `Transform` opens and resizes the images on one side, label it and convert that label to an index using `o2i` on the other side. Inside the <code>decodes</code> method, we decode the index using the `vocab`. The image is left as is (we can't really show a filename!).
#
# To use this `Transform`, we need a label function. Here we use a regex on the `name` attribute of our filenames:
labeller = using_attr(RegexLabeller(pat=r'^(.*)_\d+.jpg$'), 'name')
# Then we gather all the possible labels, uniqueify them and ask for the two correspondences (vocab and o2i) using `bidir=True`. We can then use them to build our pet transform.
vals = list(map(labeller, items))
vocab, o2i = uniqueify(vals, sort=True, bidir=True)
pets = PetTfm(vocab, o2i, labeller)
# We can check how it's applied to a filename:
x, y = pets(items[0])
x.shape, y
# And we can decode our transformed version and show it:
dec = pets.decode([x, y])
dec.show()
# Note that like `__call__ ` and <code>encodes</code>, we implemented a <code>decodes</code> method but we actually call `decode` on our `Transform`.
#
# Also note that our <code>decodes</code> method received the two objects (x and y). We said in the previous section `Transform` dispatch over tuples (for the encoding as well as the decoding) but here it took our two elements as a whole and did not try to decode x and y separately. Why is that? It's because we pass a list `[x,y]` to decodes. `Transform`s dispatch over tuples, but tuples only. And as we saw as well, to prevent a `Transform` from dispatching over a tuple, we just have to make it an `ItemTransform`:
class PetTfm(ItemTransform):
def __init__(self, vocab, o2i, lblr): self.vocab, self.o2i, self.lblr = vocab, o2i, lblr
def encodes(self, o): return (resized_image(o), self.o2i[self.lblr(o)])
def decodes(self, x): return TitledImage(x[0], self.vocab[x[1]])
dec = pets.decode(pets(items[0]))
dec.show()
# ### Setting up the internal state with a setups
# We can now let's make our `ItemTransform` automatically state its state form the data. This way, when we combine together our `Transform` with the data, it will automatically get setup without having to do anything. This is very easy to do: just copy the lines we had before to build the categories inside the transform in a <code>setups</code> method:
class PetTfm(ItemTransform):
def setups(self, items):
self.labeller = using_attr(RegexLabeller(pat=r'^(.*)_\d+.jpg$'), 'name')
vals = map(self.labeller, items)
self.vocab, self.o2i = uniqueify(vals, sort=True, bidir=True)
def encodes(self, o): return (resized_image(o), self.o2i[self.labeller(o)])
def decodes(self, x): return TitledImage(x[0], self.vocab[x[1]])
# Now we can create our `Transform`, call its setup, and it will be ready to be used:
pets = PetTfm()
pets.setup(items)
x, y = pets(items[0])
x.shape, y
# And like before, there is no problem to decode it:
dec = pets.decode((x, y))
dec.show()
# ### Combining our `Transform` with data augmentation in a `Pipeline`.
# We can take advantage of fastai's data augmentation transforms if we give the right type to our elements. Instead of returning a standard `PIL.Image`, if our transform returns the fastai type `PILImage`, we can then use any fastai's transform with it. Let's just return a `PILImage` for our first element:
class PetTfm(ItemTransform):
def setups(self, items):
self.labeller = using_attr(RegexLabeller(pat=r'^(.*)_\d+.jpg$'), 'name')
vals = map(self.labeller, items)
self.vocab, self.o2i = uniqueify(vals, sort=True, bidir=True)
def encodes(self, o): return (PILImage.create(o), self.o2i[self.labeller(o)])
def decodes(self, x): return TitledImage(x[0], self.vocab[x[1]])
# We can then combine that transform with `ToTensor`, `Resize` or `FlipItem` to randomly flip our image in a `Pipeline`:
tfms = Pipeline([PetTfm(), Resize(224), FlipItem(p=1), ToTensor()])
# Calling `setup` on a `Pipeline` will set each transform in order:
tfms.setup(items)
# To check the setup was done properly, we want to see if we did build the vocab. One cool trick of `Pipeline` is that when asking for an attribute, it will look through each of its `Transform`s for that attribute and give you the result (or the list of results if the attribute is in multiple transforms):
tfms.vocab
# Then we can call our pipeline:
x, y = tfms(items[0])
x.shape, y
# We can see `ToTensor` and `Resize` were applied to the first element of our tuple (which was of type `PILImage`) but not the second. We can even have a look at our element to check the flip was also applied:
tfms.show(tfms(items[0]))
# `Pipeline.show` will call decode on each `Transform` until it gets a type that knows how to show itself. The library considers a tuple as knowing how to show itself if all its parts have a `show` method. Here it does not happen before reaching `PetTfm` since the second part of our tuple is an int. But after decoding the original `PetTfm`, we get a `TitledImage` which has a `show` method.
#
# It's a good point to note that the `Transform`s of the `Pipeline` are sorted by their internal `order` attribute (with a default of `order=0`). You can always check the order in which the transforms are in a `Pipeline` by looking at its representation:
tfms
# Even if we define `tfms` with `Resize` before `FlipItem`, we can see they have been reordered because we have:
FlipItem.order, Resize.order
# To customize the order of a `Transform`, just set `order = ...` before the `__init__` (it's a class attribute). Let's make `PetTfm` of order -5 to be sure it's always run first:
class PetTfm(ItemTransform):
order = -5
def setups(self, items):
self.labeller = using_attr(RegexLabeller(pat=r'^(.*)_\d+.jpg$'), 'name')
vals = map(self.labeller, items)
self.vocab, self.o2i = uniqueify(vals, sort=True, bidir=True)
def encodes(self, o): return (PILImage.create(o), self.o2i[self.labeller(o)])
def decodes(self, x): return TitledImage(x[0], self.vocab[x[1]])
# Then we can mess up the order of the transforms in our `Pipeline` but it will fix itself:
tfms = Pipeline([Resize(224), PetTfm(), FlipItem(p=1), ToTensor()])
tfms
# Now that we have a good `Pipeline` of transforms, let's add it to a list of filenames to build our dataset. A `Pipeline` combined with a collection is a `TfmdLists` in fastai.
# ## `TfmdLists` and `Datasets`
# The main difference between `TfmdLists` and `Datasets` is the number of `Pipeline`s you have: `TfmdLists` take one `Pipeline` to transform a list (like we currently have) whereas `Datasets` combines several `Pipeline`s in parallel to create a tuple from one set of raw items, for instance a tuple (input, target).
# ### One pipeline makes a `TfmdLists`
# Creating a `TfmdLists` just requires a list of items and a list of transforms that will be combined in a `Pipeline`:
tls = TfmdLists(items, [Resize(224), PetTfm(), FlipItem(p=0.5), ToTensor()])
x, y = tls[0]
x.shape, y
# We did not need to pass anything to `PetTfm` thanks to our setup method: the `Pipeline` was automatically setup on the `items` during the initialization, so `PetTfm` has created its vocab like before:
tls.vocab
# We can ask the `TfmdLists` to show the items we got:
tls.show((x, y))
# Or we have a shortcut with `show_at`:
show_at(tls, 0)
# ### Traning and validation set
# `TfmdLists` has an 's' in its name because it can represent several transformed lists: your training and validation sets. To use that functionality, we just need to pass `splits` to the initialization. `splits` should be a list of lists of indices (one list per set). To help create splits, we can use all the *splitters* of the fastai library:
splits = RandomSplitter(seed=42)(items)
splits
tls = TfmdLists(items, [Resize(224), PetTfm(), FlipItem(p=0.5), ToTensor()], splits=splits)
# Then your `tls` get a train and valid attributes (it also had them before, but the valid was empty and the train contained everything).
show_at(tls.train, 0)
# An interesting thing is that unless you pass `train_setup=False`, your transforms are setup on the training set only (which is best practices): the `items` received by <code>setups</code> are just the elements of the training set.
# ### Getting to `DataLoaders`
# From a `TfmdLists`, getting a `DataLoaders` object is very easy, you just have to call the `dataloaders` method:
dls = tls.dataloaders(bs=64)
# And `show_batch` will just *work*:
dls.show_batch()
# You can even add augmentation transforms, since we have a proper fastai typed image. Just remember to add the `IntToFloatTensor` transform that deals with the conversion of int to float (augmentation transforms of fastai on the GPU require float tensors). When calling `TfmdLists.dataloaders`, you pass the `batch_tfms` to `after_batch` (and potential new `item_tfms` to `after_item`):
dls = tls.dataloaders(bs=64, after_batch=[IntToFloatTensor(), *aug_transforms()])
dls.show_batch()
# ### Using `Datasets`
# `Datasets` applies a list of list of transforms (or list of `Pipeline`s) lazily to items of a collection, creating one output per list of transforms/`Pipeline`. This makes it easier for us to separate out steps of a process, so that we can re-use them and modify the process more easily. This is what lays the foundation of the data block API: we can easily mix and match types as inputs or outputs as they are associated to certain pipelines of transforms.
#
# For instance, let's write our own `ImageResizer` transform with two different implementations for images or masks:
class ImageResizer(Transform):
order = 1
"Resize image to `size` using `resample`"
def __init__(self, size, resample=Image.BILINEAR):
if not is_listy(size):
size = (size, size)
self.size, self.resample = (size[1], size[0]), resample
def encodes(self, o: PILImage): return o.resize(size=self.size, resample=self.resample)
def encodes(self, o: PILMask): return o.resize(size=self.size, resample=Image.NEAREST)
# Specifying the type-annotations makes it so that our transform does nothing to thigns that are neither `PILImage` or `PILMask`, and resize images with `self.resample`, masks with the nearest neighbor interpolation. To create a `Datasets`, we then pass two pipelines of transforms, one for the input and one for the target:
tfms = [[PILImage.create, ImageResizer(128), ToTensor(), IntToFloatTensor()],
[labeller, Categorize()]]
dsets = Datasets(items, tfms)
# We can check that inputs and outputs have the right types:
t = dsets[0]
type(t[0]), type(t[1])
# We can decode and show using `dsets`:
x, y = dsets.decode(t)
x.shape, y
dsets.show(t)
# And we can pass our train/validation split like in `TfmdLists`:
dsets = Datasets(items, tfms, splits=splits)
# But we are not using the fact that `Transform`s dispatch over tuples here. `ImageResizer`, `ToTensor` and `IntToFloatTensor` could be passed as transforms over the tuple. This is done in `.dataloaders` by passing them to `after_item`. They won't do anything to the category but will only be applied to the inputs.
tfms = [[PILImage.create], [labeller, Categorize()]]
dsets = Datasets(items, tfms, splits=splits)
dls = dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# And we can check it works with `show_batch`:
dls.show_batch()
# If we just wanted to build one `DataLoader` from our `Datasets` (or the previous `TfmdLists`), you can pass it directly to `TfmdDL`:
dsets = Datasets(items, tfms)
dl = TfmdDL(dsets, bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# ### Segmentation
# By using the same transforms in `after_item` but a different kind of targets (here segmentation masks), the targets are automatically processed as they should with the type-dispatch system.
cv_source = untar_data(URLs.CAMVID_TINY)
cv_items = get_image_files(cv_source / 'images')
cv_splitter = RandomSplitter(seed=42)
cv_split = cv_splitter(cv_items)
def cv_label(o): return cv_source / 'labels' / f'{o.stem}_P{o.suffix}'
tfms = [[PILImage.create], [cv_label, PILMask.create]]
cv_dsets = Datasets(cv_items, tfms, splits=cv_split)
dls = cv_dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
dls.show_batch(max_n=4)
# ## Adding a test dataloader for inference
# Let's take back our pets dataset...
tfms = [[PILImage.create], [labeller, Categorize()]]
dsets = Datasets(items, tfms, splits=splits)
dls = dsets.dataloaders(bs=64, after_item=[ImageResizer(128), ToTensor(), IntToFloatTensor()])
# ...and imagine we have some new files to classify.
path = untar_data(URLs.PETS)
tst_files = get_image_files(path / "images")
len(tst_files)
# We can create a dataloader that takes those files and applies the same transforms as the validation set with `DataLoaders.test_dl`:
tst_dl = dls.test_dl(tst_files)
tst_dl.show_batch(max_n=9)
# **Extra:**
# You can call `learn.get_preds` passing this newly created dataloaders to make predictions on our new images!
# What is really cool is that after you finished training your model, you can save it with `learn.export`, this is also going to save all the transforms that need to be applied to your data. In inference time you just need to load your learner with `load_learner` and you can immediately create a dataloader with `test_dl` to use it to generate new predictions!
# ## fin -
| 1,766 | 280 | 546 |
438c1a17d13c858405dd04c12b2e193c4907c45f | 9,583 | py | Python | configs/custom/163_htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.py | onestep00/CBNetV2 | 2655034a92caf134486c527383eae0f792beead4 | [
"Apache-2.0"
] | null | null | null | configs/custom/163_htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.py | onestep00/CBNetV2 | 2655034a92caf134486c527383eae0f792beead4 | [
"Apache-2.0"
] | null | null | null | configs/custom/163_htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.py | onestep00/CBNetV2 | 2655034a92caf134486c527383eae0f792beead4 | [
"Apache-2.0"
] | 1 | 2021-08-19T01:03:24.000Z | 2021-08-19T01:03:24.000Z | _base_ = [
"../_base_/models/htc_without_semantic_swin_fpn.py",
# "../_base_/datasets/coco_instance.py",
"../_base_/datasets/coco_detection.py",
"../_base_/schedules/schedule_1x.py",
"../_base_/default_runtime.py",
]
model = dict(
backbone=dict(
type="CBSwinTransformer",
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
),
neck=dict(type="CBFPN", in_channels=[128, 256, 512, 1024]),
roi_head=dict(
bbox_head=[
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.05, 0.05, 0.1, 0.1],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.033, 0.033, 0.067, 0.067],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
],
mask_roi_extractor=None,
mask_head=None,
),
test_cfg=dict(
rcnn=dict(
score_thr=0.001,
nms=dict(type="soft_nms"),
)
),
)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
# augmentation strategy originates from HTC
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", with_bbox=True, with_mask=False, with_seg=False),
dict(
type="Resize",
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode="range",
keep_ratio=True,
),
dict(type="RandomFlip", flip_ratio=0.5),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="SegRescale", scale_factor=1 / 8),
dict(type="DefaultFormatBundle"),
dict(
type="Collect",
# keys=["img", "gt_bboxes", "gt_labels", "gt_masks", "gt_semantic_seg"],
keys=[
"img",
"gt_bboxes",
"gt_labels",
],
),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
dataset_type = "CocoDataset"
# classes = ("음식",)
classes = (
"기타",
"가래떡",
"어묵볶음",
"쌀밥",
"배추김치",
"라면류",
"닭찜",
"육류튀김",
"김치찌개",
"케이크류",
"잡곡밥",
"두부",
"제육볶음",
"열무김치",
"보리밥",
"기타빵류",
"돼지등갈비찜",
"치킨류",
"중식면류",
"달걀찜",
"조미김",
"감자볶음",
"미역국",
"김밥",
"국수류",
"기타반찬",
"김치찜",
"기타김치",
"스파게티류",
"기타떡",
"토마토",
"치즈",
"기타구이",
"등심스테이크",
"볶음밥류",
"참외",
"버섯볶음",
"샐러드",
"연근조림",
"죽류",
"기타소스/기타장류",
"돼지고기 수육",
"덮밥",
"젓갈",
"돈까스",
"시금치나물",
"포도",
"앙금빵류",
"상추",
"들깻잎",
"육류전",
"달걀프라이",
"채소류튀김",
"코다리찜",
"기타불고기",
"돼지고기구이",
"버거류",
"된장국",
"채소",
"떡볶이",
"낙지볶음",
"비빔밥",
"사과",
"피자류",
"숙주나물",
"애호박볶음",
"멸치볶음",
"생선구이",
"깻잎장아찌",
"콩조림",
"카레(커리)",
"돼지고기채소볶음",
"바나나",
"파프리카",
"고사리나물",
"미역줄기볶음",
"콩나물국",
"소불고기",
"떠먹는요구르트",
"햄",
"소고기구이",
"버섯구이",
"오이",
"된장찌개",
"무생채",
"어패류튀김",
"키위",
"리조또",
"오징어볶음",
"샌드위치류",
"만두류",
"과자",
"채소류전",
"시리얼",
"순두부찌개",
"귤",
"딸기",
"기타스테이크",
"잡채",
"오리불고기",
"취나물",
"가지볶음",
"삶은달걀",
"크림빵류",
"부침류",
"어패류전",
"한과류",
"소갈비찜",
"메추리알 장조림",
"안심스테이크",
"단호박찜",
"식빵류",
"시래기나물",
"아귀찜",
"김치볶음",
"우엉조림",
"감",
"돼지불고기",
"고기장조림",
"두부조림",
"오징어채볶음",
"즉석밥",
"오삼불고기",
"현미밥",
"파김치",
"페이스트리(파이)류",
"총각김치",
"닭가슴살",
"해물찜",
"도넛류",
"마시는요구르트",
"돼지갈비찜",
"함박스테이크",
"오징어찜",
"오이나물",
"컵/액체류용기",
"삶은브로콜리",
"청국장찌개",
"그라탕",
"적류",
"소고기채소볶음",
"조기찜",
"제품사진",
"기타해조류",
"기타장아찌/절임류",
"기타나물/숙채/생채/무침류",
"기타조림",
"기타국/찌개/탕",
"기타튀김",
"기타볶음",
"기타난류",
"기타찜",
"기타면류",
"견과류",
"기타채소류",
"기타과실류",
"크래커",
"기타전/적/부침류",
"기타밥류",
"기타죽/스프류",
"도토리묵무침",
"튀김빵류",
"기타과자류",
)
data_root = "/home/jovyan/data/filtered-food3"
anno_root = "/home/jovyan/workspace/ml_mg/json_data/"
samples_per_gpu = 1
data = dict(
workers_per_gpu=16,
samples_per_gpu=samples_per_gpu,
train=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163train.json",
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163val.json",
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163test.json",
pipeline=test_pipeline,
),
)
optimizer = dict(
_delete_=True,
type="AdamW",
lr=0.0001 * (samples_per_gpu / 2),
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
"absolute_pos_embed": dict(decay_mult=0.0),
"relative_position_bias_table": dict(decay_mult=0.0),
"norm": dict(decay_mult=0.0),
}
),
)
lr_config = dict(step=[16, 19])
runner = dict(type="EpochBasedRunnerAmp", max_epochs=40)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
log_config = dict(
interval=1,
hooks=[
dict(type="TextLoggerHook", reset_flag=True),
dict(
type="WandbLoggerHook",
init_kwargs=dict(
project="mmdetection",
name="163_class_htc_cbv2_swin_base22k_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco",
),
),
],
)
evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details.
interval=5, metric=["bbox"] # Evaluation interval
)
workflow = [("train", 1)]
# workflow = [("train", 5), ("val", 1)]
# workflow = [("val", 1)]
resume_from = "/home/jovyan/workspace/ml_mg/cbnetev2/work_dirs/163_htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco/latest.pth"
# load_from = "/home/jovyan/workspace/ml_mg/cbnetev2/work_dirs/htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco/latest.pth"
# pretrained
# load_from = "/home/jovyan/workspace/ml_mg/cbnetev2/checkpoints/htc_cbv2_swin_base22k_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.pth"
| 23.430318 | 182 | 0.508818 | _base_ = [
"../_base_/models/htc_without_semantic_swin_fpn.py",
# "../_base_/datasets/coco_instance.py",
"../_base_/datasets/coco_detection.py",
"../_base_/schedules/schedule_1x.py",
"../_base_/default_runtime.py",
]
model = dict(
backbone=dict(
type="CBSwinTransformer",
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
),
neck=dict(type="CBFPN", in_channels=[128, 256, 512, 1024]),
roi_head=dict(
bbox_head=[
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.05, 0.05, 0.1, 0.1],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class multi
num_classes=163,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.033, 0.033, 0.067, 0.067],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
],
mask_roi_extractor=None,
mask_head=None,
),
test_cfg=dict(
rcnn=dict(
score_thr=0.001,
nms=dict(type="soft_nms"),
)
),
)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
# augmentation strategy originates from HTC
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", with_bbox=True, with_mask=False, with_seg=False),
dict(
type="Resize",
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode="range",
keep_ratio=True,
),
dict(type="RandomFlip", flip_ratio=0.5),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="SegRescale", scale_factor=1 / 8),
dict(type="DefaultFormatBundle"),
dict(
type="Collect",
# keys=["img", "gt_bboxes", "gt_labels", "gt_masks", "gt_semantic_seg"],
keys=[
"img",
"gt_bboxes",
"gt_labels",
],
),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
dataset_type = "CocoDataset"
# classes = ("음식",)
classes = (
"기타",
"가래떡",
"어묵볶음",
"쌀밥",
"배추김치",
"라면류",
"닭찜",
"육류튀김",
"김치찌개",
"케이크류",
"잡곡밥",
"두부",
"제육볶음",
"열무김치",
"보리밥",
"기타빵류",
"돼지등갈비찜",
"치킨류",
"중식면류",
"달걀찜",
"조미김",
"감자볶음",
"미역국",
"김밥",
"국수류",
"기타반찬",
"김치찜",
"기타김치",
"스파게티류",
"기타떡",
"토마토",
"치즈",
"기타구이",
"등심스테이크",
"볶음밥류",
"참외",
"버섯볶음",
"샐러드",
"연근조림",
"죽류",
"기타소스/기타장류",
"돼지고기 수육",
"덮밥",
"젓갈",
"돈까스",
"시금치나물",
"포도",
"앙금빵류",
"상추",
"들깻잎",
"육류전",
"달걀프라이",
"채소류튀김",
"코다리찜",
"기타불고기",
"돼지고기구이",
"버거류",
"된장국",
"채소",
"떡볶이",
"낙지볶음",
"비빔밥",
"사과",
"피자류",
"숙주나물",
"애호박볶음",
"멸치볶음",
"생선구이",
"깻잎장아찌",
"콩조림",
"카레(커리)",
"돼지고기채소볶음",
"바나나",
"파프리카",
"고사리나물",
"미역줄기볶음",
"콩나물국",
"소불고기",
"떠먹는요구르트",
"햄",
"소고기구이",
"버섯구이",
"오이",
"된장찌개",
"무생채",
"어패류튀김",
"키위",
"리조또",
"오징어볶음",
"샌드위치류",
"만두류",
"과자",
"채소류전",
"시리얼",
"순두부찌개",
"귤",
"딸기",
"기타스테이크",
"잡채",
"오리불고기",
"취나물",
"가지볶음",
"삶은달걀",
"크림빵류",
"부침류",
"어패류전",
"한과류",
"소갈비찜",
"메추리알 장조림",
"안심스테이크",
"단호박찜",
"식빵류",
"시래기나물",
"아귀찜",
"김치볶음",
"우엉조림",
"감",
"돼지불고기",
"고기장조림",
"두부조림",
"오징어채볶음",
"즉석밥",
"오삼불고기",
"현미밥",
"파김치",
"페이스트리(파이)류",
"총각김치",
"닭가슴살",
"해물찜",
"도넛류",
"마시는요구르트",
"돼지갈비찜",
"함박스테이크",
"오징어찜",
"오이나물",
"컵/액체류용기",
"삶은브로콜리",
"청국장찌개",
"그라탕",
"적류",
"소고기채소볶음",
"조기찜",
"제품사진",
"기타해조류",
"기타장아찌/절임류",
"기타나물/숙채/생채/무침류",
"기타조림",
"기타국/찌개/탕",
"기타튀김",
"기타볶음",
"기타난류",
"기타찜",
"기타면류",
"견과류",
"기타채소류",
"기타과실류",
"크래커",
"기타전/적/부침류",
"기타밥류",
"기타죽/스프류",
"도토리묵무침",
"튀김빵류",
"기타과자류",
)
data_root = "/home/jovyan/data/filtered-food3"
anno_root = "/home/jovyan/workspace/ml_mg/json_data/"
samples_per_gpu = 1
data = dict(
workers_per_gpu=16,
samples_per_gpu=samples_per_gpu,
train=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163train.json",
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163val.json",
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "163test.json",
pipeline=test_pipeline,
),
)
optimizer = dict(
_delete_=True,
type="AdamW",
lr=0.0001 * (samples_per_gpu / 2),
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
"absolute_pos_embed": dict(decay_mult=0.0),
"relative_position_bias_table": dict(decay_mult=0.0),
"norm": dict(decay_mult=0.0),
}
),
)
lr_config = dict(step=[16, 19])
runner = dict(type="EpochBasedRunnerAmp", max_epochs=40)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
log_config = dict(
interval=1,
hooks=[
dict(type="TextLoggerHook", reset_flag=True),
dict(
type="WandbLoggerHook",
init_kwargs=dict(
project="mmdetection",
name="163_class_htc_cbv2_swin_base22k_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco",
),
),
],
)
evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details.
interval=5, metric=["bbox"] # Evaluation interval
)
workflow = [("train", 1)]
# workflow = [("train", 5), ("val", 1)]
# workflow = [("val", 1)]
resume_from = "/home/jovyan/workspace/ml_mg/cbnetev2/work_dirs/163_htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco/latest.pth"
# load_from = "/home/jovyan/workspace/ml_mg/cbnetev2/work_dirs/htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco/latest.pth"
# pretrained
# load_from = "/home/jovyan/workspace/ml_mg/cbnetev2/checkpoints/htc_cbv2_swin_base22k_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.pth"
| 0 | 0 | 0 |
e4b1ab3162f8284a57813722b83813646bd03d88 | 327 | py | Python | tests/unit/test_key.py | firemax13/getkey | 4bf53695fce32bd8c09a07c10a6d7a9c2e4f2cfd | [
"MIT"
] | 24 | 2017-08-31T21:49:37.000Z | 2021-11-10T09:58:08.000Z | tests/unit/test_key.py | firemax13/getkey | 4bf53695fce32bd8c09a07c10a6d7a9c2e4f2cfd | [
"MIT"
] | 15 | 2017-07-22T08:04:55.000Z | 2021-09-19T09:16:47.000Z | tests/unit/test_key.py | firemax13/getkey | 4bf53695fce32bd8c09a07c10a6d7a9c2e4f2cfd | [
"MIT"
] | 7 | 2018-04-05T05:40:38.000Z | 2021-01-02T06:35:08.000Z | import unittest
from getkey.platforms import PlatformTest
| 23.357143 | 44 | 0.697248 | import unittest
from getkey.platforms import PlatformTest
class TestKeys(unittest.TestCase):
def test_character_length_1(self):
key = PlatformTest().key
self.assertEqual(1, len(key.CTRL_A))
def test_character_length_3(self):
key = PlatformTest().key
self.assertEqual(3, len(key.UP))
| 178 | 13 | 76 |
20c244407f542fe9f0905909b1ff9211ac24ed17 | 271 | bzl | Python | crystal_ball/vendored/BUILD.bzl | AleksanderGondek/rise-and-fall | dce8c74c34ed2ed35f8c319bcea76de6401775c5 | [
"Apache-2.0"
] | 1 | 2021-05-24T12:28:14.000Z | 2021-05-24T12:28:14.000Z | crystal_ball/vendored/BUILD.bzl | AleksanderGondek/rise-and-fall | dce8c74c34ed2ed35f8c319bcea76de6401775c5 | [
"Apache-2.0"
] | null | null | null | crystal_ball/vendored/BUILD.bzl | AleksanderGondek/rise-and-fall | dce8c74c34ed2ed35f8c319bcea76de6401775c5 | [
"Apache-2.0"
] | null | null | null | package(default_visibility = ["//visibility:public"])
exports_files([
"node_pkg_link",
"npm_pkg_link",
"yarn_pkg_link",
"node_pkg_link/bin/node",
"yarn_pkg_link/bin/yarn"
])
alias(
name = "yarn_pkg_link/bin/yarn.js",
actual = ":yarn_pkg_link/bin/yarn",
)
| 18.066667 | 53 | 0.697417 | package(default_visibility = ["//visibility:public"])
exports_files([
"node_pkg_link",
"npm_pkg_link",
"yarn_pkg_link",
"node_pkg_link/bin/node",
"yarn_pkg_link/bin/yarn"
])
alias(
name = "yarn_pkg_link/bin/yarn.js",
actual = ":yarn_pkg_link/bin/yarn",
)
| 0 | 0 | 0 |
5c4d51bf6075905bb8fd426069508968f20e065d | 493 | py | Python | ex4.py | Oliviaha/python-the-hardway | 680ed0594878507b368ece0e58f87a6126746dcf | [
"MIT"
] | null | null | null | ex4.py | Oliviaha/python-the-hardway | 680ed0594878507b368ece0e58f87a6126746dcf | [
"MIT"
] | 1 | 2019-11-10T17:37:00.000Z | 2019-11-10T17:37:26.000Z | ex4.py | Oliviaha/python-the-hardway | 680ed0594878507b368ece0e58f87a6126746dcf | [
"MIT"
] | null | null | null | # -*-coding: utf-8 -*-
cars = 100
space_in_a_car = 4
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print "자동차", cars, "대가 있습니다."
print "운전자는", drivers, "명 뿐입니다."
print "오늘은 빈 차가", cars_not_driven, "대일 것입니다."
print "오늘은", carpool_capacity, "명을 태울 수 있습니다."
print "함께 탈 사람은", passengers, "명 있습니다."
print "차마다", average_passengers_per_car, "명 정도씩 타야 합니다." | 29 | 56 | 0.726166 | # -*-coding: utf-8 -*-
cars = 100
space_in_a_car = 4
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_car = passengers / cars_driven
print "자동차", cars, "대가 있습니다."
print "운전자는", drivers, "명 뿐입니다."
print "오늘은 빈 차가", cars_not_driven, "대일 것입니다."
print "오늘은", carpool_capacity, "명을 태울 수 있습니다."
print "함께 탈 사람은", passengers, "명 있습니다."
print "차마다", average_passengers_per_car, "명 정도씩 타야 합니다." | 0 | 0 | 0 |
3610ed1a9b985145042dea6f080b2ddfb3c2cfd9 | 2,233 | py | Python | gui/ui/plot_display_dialog.py | lcford2/graps_gui | a47b598cd746425203e8df1ddf77815e3b1522e9 | [
"MIT"
] | null | null | null | gui/ui/plot_display_dialog.py | lcford2/graps_gui | a47b598cd746425203e8df1ddf77815e3b1522e9 | [
"MIT"
] | 5 | 2020-07-22T11:51:26.000Z | 2022-02-17T20:07:12.000Z | gui/ui/plot_display_dialog.py | lcford2/graps_gui | a47b598cd746425203e8df1ddf77815e3b1522e9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\plot_display_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
| 48.543478 | 108 | 0.760412 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\plot_display_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_plot_display_dialog(object):
def setupUi(self, plot_display_dialog):
plot_display_dialog.setObjectName("plot_display_dialog")
plot_display_dialog.resize(683, 441)
self.horizontalLayout = QtWidgets.QHBoxLayout(plot_display_dialog)
self.horizontalLayout.setObjectName("horizontalLayout")
self.display_window = QtWidgets.QWidget(plot_display_dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.display_window.sizePolicy().hasHeightForWidth())
self.display_window.setSizePolicy(sizePolicy)
self.display_window.setObjectName("display_window")
self.display_vert_layout = QtWidgets.QVBoxLayout(self.display_window)
self.display_vert_layout.setObjectName("display_vert_layout")
self.horizontalLayout.addWidget(self.display_window)
self.plot_list = QtWidgets.QListWidget(plot_display_dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plot_list.sizePolicy().hasHeightForWidth())
self.plot_list.setSizePolicy(sizePolicy)
self.plot_list.setMaximumSize(QtCore.QSize(200, 16777215))
self.plot_list.setObjectName("plot_list")
self.horizontalLayout.addWidget(self.plot_list)
self.retranslateUi(plot_display_dialog)
QtCore.QMetaObject.connectSlotsByName(plot_display_dialog)
def retranslateUi(self, plot_display_dialog):
_translate = QtCore.QCoreApplication.translate
plot_display_dialog.setWindowTitle(_translate("plot_display_dialog", "Dialog"))
| 1,793 | 16 | 76 |
c916683e9e3f75aa143a004d1b56f45c51e118b1 | 1,045 | py | Python | OverlappingProblem_leetCode.py | amukher3/Problem_solutions | 8fa6014a91f295d08cafb989024caa91d99211d9 | [
"Apache-2.0"
] | 1 | 2021-12-28T08:58:51.000Z | 2021-12-28T08:58:51.000Z | OverlappingProblem_leetCode.py | amukher3/Coding | a330cb04b5dd5cc1c3cf69249417a71586441bc7 | [
"Apache-2.0"
] | null | null | null | OverlappingProblem_leetCode.py | amukher3/Coding | a330cb04b5dd5cc1c3cf69249417a71586441bc7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 19:16:54 2020
@author: Abhishek Mukherjee
"""
#Merge overlapping intervals...
#Leetcode problem
| 26.794872 | 57 | 0.378947 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 19:16:54 2020
@author: Abhishek Mukherjee
"""
#Merge overlapping intervals...
#Leetcode problem
class Solution:
def merge(self, intervals):
arr=intervals
i=0
arr.sort()
while i<=len(arr)-1:
LB1=arr[i][0]
UB1=arr[i][1]
if(i<=len(arr)-2):
LB2=arr[i+1][0]
UB2=arr[i+1][1]
#if(LB2<=UB1 and UB1!=LB1 and UB2!=LB2):
if(LB2<=UB1):
#Overlapped case
minLB=min(LB1,LB2)
maxUB=max(UB1,UB2)
arr[i][0]=minLB
arr[i][1]=maxUB
arr.pop(i+1)
continue
else:
#Non-overlapping case
#move the pointer to the
#next list.
i+=1
continue
break
return arr | 844 | -6 | 51 |
24ab160d5913532f08db77faf57d52324bb8718e | 3,703 | py | Python | RL/muzero/network.py | tsubame-mz/reinforcement_learning | b69cce573deed11676f60653b41e036d8e79aedc | [
"MIT"
] | null | null | null | RL/muzero/network.py | tsubame-mz/reinforcement_learning | b69cce573deed11676f60653b41e036d8e79aedc | [
"MIT"
] | null | null | null | RL/muzero/network.py | tsubame-mz/reinforcement_learning | b69cce573deed11676f60653b41e036d8e79aedc | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
from typing import Tuple
class Representation(nn.Module):
"""
observation -> hidden state
"""
class Prediction(nn.Module):
"""
hidden state -> policy + value
"""
class Dynamics(nn.Module):
"""
hidden state + action -> next hidden state + reward
"""
| 31.381356 | 102 | 0.641102 | import torch
import torch.nn as nn
import numpy as np
from typing import Tuple
def weight_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
nn.init.constant_(m.bias, 0)
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class MLP(nn.Module):
def __init__(self, in_units: int, hid_units: int, out_units: int):
super(MLP, self).__init__()
self.layers = nn.Sequential(
nn.Linear(in_units, hid_units),
Swish(),
nn.Linear(hid_units, hid_units),
Swish(),
nn.Linear(hid_units, out_units),
)
self.layers.apply(weight_init)
def forward(self, obs):
return self.layers(obs)
class Representation(nn.Module):
"""
observation -> hidden state
"""
def __init__(self, obs_units: int, hid_units: int, state_units: int):
super(Representation, self).__init__()
self.layers = MLP(obs_units, hid_units, state_units)
def forward(self, x):
return self.layers(x)
class Prediction(nn.Module):
"""
hidden state -> policy + value
"""
def __init__(self, state_units: int, hid_units: int, act_units: int):
super(Prediction, self).__init__()
# Policy : 各行動の確率(logit)を出力
self.policy_layers = nn.Sequential(MLP(state_units, hid_units, act_units), nn.Softmax(dim=-1))
# Value : 状態の価値を出力
self.value_layers = MLP(state_units, hid_units, 1)
def forward(self, x):
return self.policy_layers(x), self.value_layers(x)
class Dynamics(nn.Module):
"""
hidden state + action -> next hidden state + reward
"""
def __init__(self, state_units: int, act_units: int, hid_units: int):
super(Dynamics, self).__init__()
in_units = state_units + act_units
self.state_layers = MLP(in_units, hid_units, state_units)
self.reward_layers = MLP(in_units, hid_units, 1)
def forward(self, x):
return self.state_layers(x), self.reward_layers(x)
class Network(nn.Module):
def __init__(self, obs_units: int, act_units: int, state_units: int, hid_units: int):
super(Network, self).__init__()
self.representation = Representation(obs_units, hid_units, state_units)
self.prediction = Prediction(state_units, hid_units, act_units)
self.dynamics = Dynamics(state_units, act_units, hid_units)
def initial_inference(self, obs: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Representation + Prediction
"""
state = self.representation(self._conditioned_observation(obs))
policy, value = self.prediction(state)
return state, policy, value
def recurrent_inference(
self, state: torch.Tensor, action: np.ndarray
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Dynamics + Prediction
"""
next_state, reward = self.dynamics(self._conditioned_state(state, action))
policy, value = self.prediction(next_state)
return next_state, reward, policy, value
def _conditioned_observation(self, obs: np.array) -> torch.Tensor:
obs_t = obs.T
taxi_row = torch.eye(5)[obs_t[0]]
taxi_col = torch.eye(5)[obs_t[1]]
pass_idx = torch.eye(5)[obs_t[2]]
dest_idx = torch.eye(4)[obs_t[3]]
return torch.cat([taxi_row, taxi_col, pass_idx, dest_idx], dim=1)
def _conditioned_state(self, state: torch.Tensor, action: np.ndarray) -> torch.Tensor:
return torch.cat([state, torch.eye(6)[action]], dim=1)
| 2,231 | 797 | 360 |
52035fccea24cd0d4bc076e49ebe3571ea9e6e7d | 199 | py | Python | web_project/settings/development.py | nalanzo2001/python-django-template | 9fe0c92101981d44fc984bb4f0fc5dc14cf60170 | [
"MIT"
] | null | null | null | web_project/settings/development.py | nalanzo2001/python-django-template | 9fe0c92101981d44fc984bb4f0fc5dc14cf60170 | [
"MIT"
] | null | null | null | web_project/settings/development.py | nalanzo2001/python-django-template | 9fe0c92101981d44fc984bb4f0fc5dc14cf60170 | [
"MIT"
] | null | null | null | # djangodocker/settings/production.py
from os import environ
from .base import *
# Current mode
MODE = 'Development'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
| 19.9 | 65 | 0.758794 | # djangodocker/settings/production.py
from os import environ
from .base import *
# Current mode
MODE = 'Development'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
| 0 | 0 | 0 |
0fcaf20e11ce05012056ea4f46afda0c65c896fa | 11,786 | py | Python | pyETC/write_results.py | mtourneur/pyETC | ea97f22d949d01e5191a1895493aa08ca32db1bf | [
"MIT"
] | 1 | 2018-11-14T18:21:56.000Z | 2018-11-14T18:21:56.000Z | pyETC/write_results.py | mtourneur/pyETC | ea97f22d949d01e5191a1895493aa08ca32db1bf | [
"MIT"
] | null | null | null | pyETC/write_results.py | mtourneur/pyETC | ea97f22d949d01e5191a1895493aa08ca32db1bf | [
"MIT"
] | null | null | null | import numpy as np
def write_results(info_dict):
""" Write the results in a file """
f = open('%s/results/results_summary.txt' % info_dict['MainDirectory'], 'w')
f.write ('Information about the simulation:\n')
f.write ('---------------------------------\n')
if info_dict['etc_type'] == 'time':
f.write ('Compute the time required to observe the object with a SNR=%.2f \n' % info_dict['SNR'])
if info_dict['object_type'] == "magnitude":
f.write ('Object: magnitude of %.2f\n' % info_dict['object_magnitude'])
else:
f.write ('Object: %s/%s\n' % (info_dict['object_folder'],info_dict['object_file']))
elif info_dict['etc_type'] == 'snr':
f.write ('Compute the SNR reached when observing the object during %d exposure(s) of %.2f seconds\n' % (info_dict['Nexp'],info_dict['total_exposure_time']/info_dict['Nexp']))
if info_dict['object_type'] == "magnitude":
f.write ('Object: magnitude of %.2f\n' % info_dict['object_magnitude'])
else:
f.write ('Object: %s/%s\n' % (info_dict['object_folder'],info_dict['object_file']))
elif info_dict['etc_type'] == 'mag':
f.write ('Compute the magnitude reached when observing during %d exposure(s) of %.2f seconds with a SNR=%.2f\n' % (info_dict['Nexp'],info_dict['total_exposure_time']/info_dict['Nexp'],info_dict['SNR']))
f.write ('\nInformation about Passband:\n')
f.write ('----------------------------\n')
f.write ('Filter: %s %s\n' % (info_dict['filter_folder'],info_dict['filter_band']))
f.write ('Cut_on: %.f angstroms\n' % info_dict['Passband_cuton'])
f.write ('Effective wavelength: %.f angstroms\n' % info_dict['effWavelength'])
f.write ('Cut_off: %.f angstroms\n' % info_dict['Passband_cutoff'])
f.write ('\nInformation about Local conditions:\n')
f.write ('----------------------------\n')
f.write ('Site: %s\n' % info_dict['sky_site'])
f.write ('Seeing at zenith: %.2f\n' % info_dict['seeing_zenith'])
f.write ('Elevation: %.2f degrees\n' % info_dict['elevation'])
f.write ('Airmass: %.2f\n' % info_dict['airmass'])
f.write ('Moon age: %.2f\n' % info_dict['moon_age'])
if info_dict['detailed_trans']==1:
f.write ('\nMEAN EFFICENCIES:\n')
f.write ('------------------\n')
f.write ('Obscuration: %.2f \n' % (1.-info_dict['obstruction']))
f.write ('Telescope: %.2f (+obs: %.2f) \n' % (info_dict['trans_mean_tel'],info_dict['trans_mean_tel']*(1.-info_dict['obstruction'])))
f.write ('Instrument: %.2f \n' % info_dict['trans_mean_inst'])
f.write ('Optics (tel+inst): %.2f (+obs: %.2f) \n' % (info_dict['trans_mean_optics'],info_dict['trans_mean_optics']*(1.-info_dict['obstruction'])))
f.write ('Filter: %.2f \n' % info_dict['trans_mean_filter'])
f.write ('Atmosphere: %.2f \n' % info_dict['trans_mean_atm'])
f.write ('Camera: %.2f \n' % info_dict['trans_mean_cam'])
f.write ('System: %.2f (+obs: %.2f)\n' % (info_dict['trans_mean_system'],info_dict['trans_mean_system']*(1-info_dict['obstruction'])))
elif info_dict['detailed_trans'] == 0:
f.write ('\nMEAN EFFICENCIES:\n')
f.write ('------------------\n')
f.write ('Obscuration: %.2f \n' % (1.-info_dict['obstruction']))
f.write ('System: %.2f (+obs: %.2f)\n' % (info_dict['trans_mean_system'],info_dict['trans_mean_system']*(1-info_dict['obstruction'])))
f.write ('\nZeropoint: %.2f (%s mag) \n' % (info_dict['zeropoint'],info_dict['photometry_system']))
if info_dict['etc_type'] == 'snr':
f.write ('\n\nA magnitude (%s system) of %.2f in %s band within a total exposure time of %.2f seconds splited in %d exposure(s), implies a total SNR of :\n\n' %(info_dict['photometry_system'],info_dict['mag'] ,info_dict['filter_band'],info_dict['exptime'],info_dict['Nexp']))
f.write ('\t - Integrated SNR over %d pixels: %.2f \n\n' % (info_dict['npix'], info_dict['SNR']))
f.write ('\nA magnitude (%s system) of %.2f in %s band within a total exposure time of %.2f seconds splited in %d exposure(s), implies a SNR for the central pixel of of :\n\n' %(info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band'],info_dict['DIT_pix']*info_dict['Nexp'],info_dict['Nexp']))
f.write ('\t - SNR of the central pixel: %.2f \n\n' % info_dict['SNR_pix'])
elif info_dict['etc_type'] == 'time':
f.write ('\n\nA magnitude (%s system) of %.2f in %s band with a total SNR of %.2f requires:\n\n' %(info_dict['photometry_system'],info_dict['mag'],info_dict['filter_band'],info_dict['SNR']))
f.write ('\t - a Total exposure time of : %.2f \n\n' % (info_dict['DIT']*info_dict['Nexp']))
f.write ('\n\nA magnitude (%s system) of %.2f in %s band with a SNR of %.2f for the central pixel requires:\n\n' %(info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band'],info_dict['SNR']))
f.write ('\t - a Total exposure time of : %.2f \n\n' % (info_dict['DIT_pix'] * info_dict['Nexp']))
elif info_dict['etc_type'] == 'mag':
f.write ('\n\nFor a total SNR=%.2f in a total exposure time of %.2f (sec) in %d exposure(s) we reach:\n\n' %(info_dict['SNR'],info_dict['exptime'], info_dict['Nexp']))
f.write ('\t - a magnitude (%s system) of: %.2f in %s band\n\n' % (info_dict['photometry_system'],info_dict['mag'],info_dict['filter_band']))
f.write ('\n\nFor the central pixel a SNR=%.2f in a total exposure time of %.2f (sec) in %d exposure(s) we reach:\n\n' %(info_dict['SNR'], info_dict['DIT_pix']*info_dict['Nexp'], info_dict['Nexp']))
f.write ('\t - a magnitude (%s system) of: %.2f in %s band\n\n' % (info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band']))
#f.write ('\nFull well capacity of 1 pixel: %.2f (electrons)\nInverse gain of %.2f e/ADU and %d bits implies a maximum number of electrons to be digitized of %.2f (electrons) \n' % (info_dict['cameras'][info_dict['channel']]['FWC'],info_dict['cameras'][info_dict['channel']]['gain'],info_dict['cameras'][info_dict['channel']]['bits'],info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1)))
f.write ('\nFull well capacity of 1 pixel: %.2f (electrons)' % (info_dict['cameras'][info_dict['channel']]['FWC']))
f.write ('\n\n--------- One pixel only------------------\n')
f.write ('\nPhoto-electrons created: central pix for %d exposure(s) of %.2f sec \n' % (info_dict['Nexp'],info_dict['DIT_pix']))
f.write ('\tby:\n')
f.write ('\t- Object: %10.2f (electrons)\n' % info_dict['Ftot_el_pix'])
f.write ('\t- Sky: %10.2f (electrons)\n' % (info_dict['Sky_CountRate']*info_dict['DIT_pix']))
f.write ('\t- Readout: %10.2f (electrons)\n' % info_dict['cameras'][info_dict['channel']]['RN'])
f.write ('\t- Dark current: %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['DC']*info_dict['DIT_pix']))
f.write ('\t- Digitization: %10.2f (electrons)\n' % info_dict['dig_noise'])
f.write ('\nSNR: -central pixel: %.2f\n' % (np.sqrt(info_dict['Nexp'])*info_dict['Ftot_el_pix']/np.sqrt(info_dict['Ftot_el_pix'] + info_dict['factor_ima'] * ((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT_pix'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write ('\nTotal of electrons collected in the central pixel during an exposure time of %d seconds: %.2f \n' % (info_dict['DIT_pix'], info_dict['N_el_tot_pix1'] ))
if info_dict['N_el_tot_pix1'] > info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Central pixel saturated: number of electrons > Full well Capacity\n')
elif info_dict['N_el_tot_pix1'] > info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1):
f.write ('--> Central pixel saturated: number of electrons > number of digitizations\n')
elif info_dict['N_el_tot_pix1'] > 1./2*info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Number of electrons in central pixel > 1/2 of Full well Capacity. Risk of non-linear response.\n')
else:
f.write ('--> No saturation\n')
f.write ('\n\n\n--------- Integrated over %d pixels------------------\n' % info_dict['npix'])
f.write ('\nPhoto-electrons created: brightest pix | total of %d pixels, %d exposure(s) of %.2f sec \n' % (info_dict['npix'],info_dict['Nexp'],info_dict['DIT']))
f.write ('\tby:\n')
f.write ('\t- Object: %10.2f | %10.2f (electrons)\n' % (info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF'], info_dict['Ftot_el']))
f.write ('\t- Sky: %10.2f | %10.2f (electrons)\n' % (info_dict['Sky_CountRate']*info_dict['DIT'],(info_dict['Sky_CountRate'] * info_dict['npix']* info_dict['DIT'] * info_dict['Nexp'])))
f.write ('\t- Readout: %10.2f | %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['RN'],(info_dict['cameras'][info_dict['channel']]['RN'] * info_dict['npix'] * info_dict['Nexp'])))
f.write ('\t- Dark current: %10.2f | %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['DC']*info_dict['DIT'],(info_dict['cameras'][info_dict['channel']]['DC'] * info_dict['DIT'] * info_dict['npix'] * info_dict['Nexp'])))
f.write ('\t- Digitization: %10.2f | %10.2f (electrons)\n' % (info_dict['dig_noise'], (info_dict['dig_noise'] * info_dict['npix'] * info_dict['Nexp'])))
#f.write ('\nTotal noise: %.2f \n' % (np.sqrt(Ftot_el * f_PSF * DIT *Nexp + Nexp*factor_ima * npix*((RN**2. + DigN**2.) + DIT * ( DC + BN )))))
f.write ('\nSNR: -central pixel: %.2f\n' % (np.sqrt(info_dict['Nexp'])*info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF']/np.sqrt(info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF'] + info_dict['factor_ima'] * ((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write (' -integrated over %d pixels: %.2f\n' % (info_dict['npix'],(np.sqrt(info_dict['Nexp']) * info_dict['Ftot_el']) / np.sqrt(info_dict ['Ftot_el'] + info_dict['factor_ima'] * info_dict['npix']*((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write ('\nTotal of electrons collected in the brightest pixel during an exposure time of %d seconds: %.2f \n' % (info_dict['DIT'], info_dict['N_el_tot_pix2']))
if info_dict['N_el_tot_pix2'] > info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Brightest pixel saturated: number of electrons > Full well Capacity \n')
elif info_dict['N_el_tot_pix2'] > info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1):
f.write ('--> Brightest pixel saturated: number of electrons > number of digitizations\n')
elif info_dict['N_el_tot_pix2'] > 1./2*info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Number of electrons in brightest pixel > 1/2 of Full well Capacity. Risk of non-linear response.\n')
else:
f.write ('--> No saturation\n')
f.write ('\nDead time: %.2f sec \n(%.2f sec for dithering, the %.2f sec for the readout are not taken into account)\n' % (info_dict['deadtime_tot'],info_dict['T_dithering'],info_dict['cameras'][info_dict['channel']]['ReadoutTime']))
f.close()
| 85.405797 | 452 | 0.620906 | import numpy as np
def write_results(info_dict):
""" Write the results in a file """
f = open('%s/results/results_summary.txt' % info_dict['MainDirectory'], 'w')
f.write ('Information about the simulation:\n')
f.write ('---------------------------------\n')
if info_dict['etc_type'] == 'time':
f.write ('Compute the time required to observe the object with a SNR=%.2f \n' % info_dict['SNR'])
if info_dict['object_type'] == "magnitude":
f.write ('Object: magnitude of %.2f\n' % info_dict['object_magnitude'])
else:
f.write ('Object: %s/%s\n' % (info_dict['object_folder'],info_dict['object_file']))
elif info_dict['etc_type'] == 'snr':
f.write ('Compute the SNR reached when observing the object during %d exposure(s) of %.2f seconds\n' % (info_dict['Nexp'],info_dict['total_exposure_time']/info_dict['Nexp']))
if info_dict['object_type'] == "magnitude":
f.write ('Object: magnitude of %.2f\n' % info_dict['object_magnitude'])
else:
f.write ('Object: %s/%s\n' % (info_dict['object_folder'],info_dict['object_file']))
elif info_dict['etc_type'] == 'mag':
f.write ('Compute the magnitude reached when observing during %d exposure(s) of %.2f seconds with a SNR=%.2f\n' % (info_dict['Nexp'],info_dict['total_exposure_time']/info_dict['Nexp'],info_dict['SNR']))
f.write ('\nInformation about Passband:\n')
f.write ('----------------------------\n')
f.write ('Filter: %s %s\n' % (info_dict['filter_folder'],info_dict['filter_band']))
f.write ('Cut_on: %.f angstroms\n' % info_dict['Passband_cuton'])
f.write ('Effective wavelength: %.f angstroms\n' % info_dict['effWavelength'])
f.write ('Cut_off: %.f angstroms\n' % info_dict['Passband_cutoff'])
f.write ('\nInformation about Local conditions:\n')
f.write ('----------------------------\n')
f.write ('Site: %s\n' % info_dict['sky_site'])
f.write ('Seeing at zenith: %.2f\n' % info_dict['seeing_zenith'])
f.write ('Elevation: %.2f degrees\n' % info_dict['elevation'])
f.write ('Airmass: %.2f\n' % info_dict['airmass'])
f.write ('Moon age: %.2f\n' % info_dict['moon_age'])
if info_dict['detailed_trans']==1:
f.write ('\nMEAN EFFICENCIES:\n')
f.write ('------------------\n')
f.write ('Obscuration: %.2f \n' % (1.-info_dict['obstruction']))
f.write ('Telescope: %.2f (+obs: %.2f) \n' % (info_dict['trans_mean_tel'],info_dict['trans_mean_tel']*(1.-info_dict['obstruction'])))
f.write ('Instrument: %.2f \n' % info_dict['trans_mean_inst'])
f.write ('Optics (tel+inst): %.2f (+obs: %.2f) \n' % (info_dict['trans_mean_optics'],info_dict['trans_mean_optics']*(1.-info_dict['obstruction'])))
f.write ('Filter: %.2f \n' % info_dict['trans_mean_filter'])
f.write ('Atmosphere: %.2f \n' % info_dict['trans_mean_atm'])
f.write ('Camera: %.2f \n' % info_dict['trans_mean_cam'])
f.write ('System: %.2f (+obs: %.2f)\n' % (info_dict['trans_mean_system'],info_dict['trans_mean_system']*(1-info_dict['obstruction'])))
elif info_dict['detailed_trans'] == 0:
f.write ('\nMEAN EFFICENCIES:\n')
f.write ('------------------\n')
f.write ('Obscuration: %.2f \n' % (1.-info_dict['obstruction']))
f.write ('System: %.2f (+obs: %.2f)\n' % (info_dict['trans_mean_system'],info_dict['trans_mean_system']*(1-info_dict['obstruction'])))
f.write ('\nZeropoint: %.2f (%s mag) \n' % (info_dict['zeropoint'],info_dict['photometry_system']))
if info_dict['etc_type'] == 'snr':
f.write ('\n\nA magnitude (%s system) of %.2f in %s band within a total exposure time of %.2f seconds splited in %d exposure(s), implies a total SNR of :\n\n' %(info_dict['photometry_system'],info_dict['mag'] ,info_dict['filter_band'],info_dict['exptime'],info_dict['Nexp']))
f.write ('\t - Integrated SNR over %d pixels: %.2f \n\n' % (info_dict['npix'], info_dict['SNR']))
f.write ('\nA magnitude (%s system) of %.2f in %s band within a total exposure time of %.2f seconds splited in %d exposure(s), implies a SNR for the central pixel of of :\n\n' %(info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band'],info_dict['DIT_pix']*info_dict['Nexp'],info_dict['Nexp']))
f.write ('\t - SNR of the central pixel: %.2f \n\n' % info_dict['SNR_pix'])
elif info_dict['etc_type'] == 'time':
f.write ('\n\nA magnitude (%s system) of %.2f in %s band with a total SNR of %.2f requires:\n\n' %(info_dict['photometry_system'],info_dict['mag'],info_dict['filter_band'],info_dict['SNR']))
f.write ('\t - a Total exposure time of : %.2f \n\n' % (info_dict['DIT']*info_dict['Nexp']))
f.write ('\n\nA magnitude (%s system) of %.2f in %s band with a SNR of %.2f for the central pixel requires:\n\n' %(info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band'],info_dict['SNR']))
f.write ('\t - a Total exposure time of : %.2f \n\n' % (info_dict['DIT_pix'] * info_dict['Nexp']))
elif info_dict['etc_type'] == 'mag':
f.write ('\n\nFor a total SNR=%.2f in a total exposure time of %.2f (sec) in %d exposure(s) we reach:\n\n' %(info_dict['SNR'],info_dict['exptime'], info_dict['Nexp']))
f.write ('\t - a magnitude (%s system) of: %.2f in %s band\n\n' % (info_dict['photometry_system'],info_dict['mag'],info_dict['filter_band']))
f.write ('\n\nFor the central pixel a SNR=%.2f in a total exposure time of %.2f (sec) in %d exposure(s) we reach:\n\n' %(info_dict['SNR'], info_dict['DIT_pix']*info_dict['Nexp'], info_dict['Nexp']))
f.write ('\t - a magnitude (%s system) of: %.2f in %s band\n\n' % (info_dict['photometry_system'],info_dict['mag_pix'],info_dict['filter_band']))
#f.write ('\nFull well capacity of 1 pixel: %.2f (electrons)\nInverse gain of %.2f e/ADU and %d bits implies a maximum number of electrons to be digitized of %.2f (electrons) \n' % (info_dict['cameras'][info_dict['channel']]['FWC'],info_dict['cameras'][info_dict['channel']]['gain'],info_dict['cameras'][info_dict['channel']]['bits'],info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1)))
f.write ('\nFull well capacity of 1 pixel: %.2f (electrons)' % (info_dict['cameras'][info_dict['channel']]['FWC']))
f.write ('\n\n--------- One pixel only------------------\n')
f.write ('\nPhoto-electrons created: central pix for %d exposure(s) of %.2f sec \n' % (info_dict['Nexp'],info_dict['DIT_pix']))
f.write ('\tby:\n')
f.write ('\t- Object: %10.2f (electrons)\n' % info_dict['Ftot_el_pix'])
f.write ('\t- Sky: %10.2f (electrons)\n' % (info_dict['Sky_CountRate']*info_dict['DIT_pix']))
f.write ('\t- Readout: %10.2f (electrons)\n' % info_dict['cameras'][info_dict['channel']]['RN'])
f.write ('\t- Dark current: %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['DC']*info_dict['DIT_pix']))
f.write ('\t- Digitization: %10.2f (electrons)\n' % info_dict['dig_noise'])
f.write ('\nSNR: -central pixel: %.2f\n' % (np.sqrt(info_dict['Nexp'])*info_dict['Ftot_el_pix']/np.sqrt(info_dict['Ftot_el_pix'] + info_dict['factor_ima'] * ((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT_pix'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write ('\nTotal of electrons collected in the central pixel during an exposure time of %d seconds: %.2f \n' % (info_dict['DIT_pix'], info_dict['N_el_tot_pix1'] ))
if info_dict['N_el_tot_pix1'] > info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Central pixel saturated: number of electrons > Full well Capacity\n')
elif info_dict['N_el_tot_pix1'] > info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1):
f.write ('--> Central pixel saturated: number of electrons > number of digitizations\n')
elif info_dict['N_el_tot_pix1'] > 1./2*info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Number of electrons in central pixel > 1/2 of Full well Capacity. Risk of non-linear response.\n')
else:
f.write ('--> No saturation\n')
f.write ('\n\n\n--------- Integrated over %d pixels------------------\n' % info_dict['npix'])
f.write ('\nPhoto-electrons created: brightest pix | total of %d pixels, %d exposure(s) of %.2f sec \n' % (info_dict['npix'],info_dict['Nexp'],info_dict['DIT']))
f.write ('\tby:\n')
f.write ('\t- Object: %10.2f | %10.2f (electrons)\n' % (info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF'], info_dict['Ftot_el']))
f.write ('\t- Sky: %10.2f | %10.2f (electrons)\n' % (info_dict['Sky_CountRate']*info_dict['DIT'],(info_dict['Sky_CountRate'] * info_dict['npix']* info_dict['DIT'] * info_dict['Nexp'])))
f.write ('\t- Readout: %10.2f | %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['RN'],(info_dict['cameras'][info_dict['channel']]['RN'] * info_dict['npix'] * info_dict['Nexp'])))
f.write ('\t- Dark current: %10.2f | %10.2f (electrons)\n' % (info_dict['cameras'][info_dict['channel']]['DC']*info_dict['DIT'],(info_dict['cameras'][info_dict['channel']]['DC'] * info_dict['DIT'] * info_dict['npix'] * info_dict['Nexp'])))
f.write ('\t- Digitization: %10.2f | %10.2f (electrons)\n' % (info_dict['dig_noise'], (info_dict['dig_noise'] * info_dict['npix'] * info_dict['Nexp'])))
#f.write ('\nTotal noise: %.2f \n' % (np.sqrt(Ftot_el * f_PSF * DIT *Nexp + Nexp*factor_ima * npix*((RN**2. + DigN**2.) + DIT * ( DC + BN )))))
f.write ('\nSNR: -central pixel: %.2f\n' % (np.sqrt(info_dict['Nexp'])*info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF']/np.sqrt(info_dict['Ftot_el']*info_dict['f_pix']/info_dict['f_PSF'] + info_dict['factor_ima'] * ((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write (' -integrated over %d pixels: %.2f\n' % (info_dict['npix'],(np.sqrt(info_dict['Nexp']) * info_dict['Ftot_el']) / np.sqrt(info_dict ['Ftot_el'] + info_dict['factor_ima'] * info_dict['npix']*((info_dict['cameras'][info_dict['channel']]['RN']**2. + info_dict['dig_noise']**2.) + info_dict['DIT'] * ( info_dict['cameras'][info_dict['channel']]['DC'] + info_dict['Sky_CountRate'] )))))
f.write ('\nTotal of electrons collected in the brightest pixel during an exposure time of %d seconds: %.2f \n' % (info_dict['DIT'], info_dict['N_el_tot_pix2']))
if info_dict['N_el_tot_pix2'] > info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Brightest pixel saturated: number of electrons > Full well Capacity \n')
elif info_dict['N_el_tot_pix2'] > info_dict['cameras'][info_dict['channel']]['gain']*(2.**(info_dict['cameras'][info_dict['channel']]['bits'])-1):
f.write ('--> Brightest pixel saturated: number of electrons > number of digitizations\n')
elif info_dict['N_el_tot_pix2'] > 1./2*info_dict['cameras'][info_dict['channel']]['FWC']:
f.write ('--> Number of electrons in brightest pixel > 1/2 of Full well Capacity. Risk of non-linear response.\n')
else:
f.write ('--> No saturation\n')
f.write ('\nDead time: %.2f sec \n(%.2f sec for dithering, the %.2f sec for the readout are not taken into account)\n' % (info_dict['deadtime_tot'],info_dict['T_dithering'],info_dict['cameras'][info_dict['channel']]['ReadoutTime']))
f.close()
| 0 | 0 | 0 |
f4f00fd578205759f8adb7b28eb8ef985c1e90aa | 933 | py | Python | tests/test_countablelist.py | vaporyco/pyrlp | bdef65842a310d610e277096b403d284999ecbaa | [
"MIT"
] | null | null | null | tests/test_countablelist.py | vaporyco/pyrlp | bdef65842a310d610e277096b403d284999ecbaa | [
"MIT"
] | null | null | null | tests/test_countablelist.py | vaporyco/pyrlp | bdef65842a310d610e277096b403d284999ecbaa | [
"MIT"
] | null | null | null | import pytest
from rlp.sedes import big_endian_int
from rlp.sedes.lists import CountableList
from rlp import SerializationError
| 33.321429 | 74 | 0.585209 | import pytest
from rlp.sedes import big_endian_int
from rlp.sedes.lists import CountableList
from rlp import SerializationError
def test_countable_list():
l1 = CountableList(big_endian_int)
serializable = ([], [1, 2], list(range(500)))
for s in serializable:
r = l1.serialize(s)
assert l1.deserialize(r) == s
not_serializable = ([1, 'asdf'], ['asdf'], [1, [2]], [[]])
for n in not_serializable:
with pytest.raises(SerializationError):
l1.serialize(n)
l2 = CountableList(CountableList(big_endian_int))
serializable = ([], [[]], [[1, 2, 3], [4]], [[5], [6, 7, 8]], [[], [],
[9, 0]])
for s in serializable:
r = l2.serialize(s)
assert l2.deserialize(r) == s
not_serializable = ([[[]]], [1, 2], [1, ['asdf'], ['fdsa']])
for n in not_serializable:
with pytest.raises(SerializationError):
l2.serialize(n)
| 781 | 0 | 23 |
e0013af625fc4703d9a5cd27a0abf241450b8f8e | 11,040 | py | Python | gui/alarm_handler.py | a-bombarda/mvm-gui | e00c3fe39cf25c6fb2d2725891610da8885d1d76 | [
"MIT"
] | null | null | null | gui/alarm_handler.py | a-bombarda/mvm-gui | e00c3fe39cf25c6fb2d2725891610da8885d1d76 | [
"MIT"
] | null | null | null | gui/alarm_handler.py | a-bombarda/mvm-gui | e00c3fe39cf25c6fb2d2725891610da8885d1d76 | [
"MIT"
] | null | null | null | """
Tools for asking the ESP about any alarms that have been raised,
and telling the user about them if so.
The top alarmbar shows little QPushButtons for each alarm that is currently active.
If the user clicks a button, they are shown the message text and a "snooze" button
for that alarm.
There is a single physical snooze button which is manipulated based on which alarm
the user has selected.
"""
import sys
from communication import rpi
from PyQt5 import QtCore, QtWidgets
BITMAP = {1 << x: x for x in range(32)}
ERROR = 0
WARNING = 1
class SnoozeButton:
"""
Takes care of snoozing alarms.
Class members:
- _esp32: ESP32Serial object for communication
- _alarm_h: AlarmHandler
- _alarmsnooze: QPushButton that user will press
- _code: The alarm code that the user is currently dealing with
- _mode: Whether the current alarm is an ERROR or a WARNING
"""
def __init__(self, esp32, alarm_h, alarmsnooze):
"""
Constructor
Arguments: see relevant class members
"""
self._esp32 = esp32
self._alarm_h = alarm_h
self._alarmsnooze = alarmsnooze
self._alarmsnooze.hide()
self._code = None
self._mode = None
self._alarmsnooze.clicked.connect(self._on_click_snooze)
self._alarmsnooze.setStyleSheet(
'background-color: rgb(0,0,205); color: white; font-weight: bold;')
self._alarmsnooze.setMaximumWidth(150)
def set_code(self, code):
"""
Sets the alarm code
Arguments:
- code: Integer alarm code
"""
self._code = code
self._alarmsnooze.setText('Snooze %s' % str(BITMAP[self._code]))
def set_mode(self, mode):
"""
Sets the mode.
Arguments:
- mode: ALARM or WARNING
"""
self._mode = mode
def show(self):
"""
Shows the snooze alarm button
"""
self._alarmsnooze.show()
def _on_click_snooze(self):
"""
The callback function called when the alarm snooze button is clicked.
"""
if self._mode not in [WARNING, ERROR]:
raise Exception('mode must be alarm or warning.')
# Reset the alarms/warnings in the ESP
# If the ESP connection fails at this
# time, raise an error box
if self._mode == ERROR:
self._esp32.snooze_hw_alarm(self._code)
self._alarm_h.snooze_alarm(self._code)
else:
self._esp32.reset_warnings()
self._alarm_h.snooze_warning(self._code)
class AlarmButton(QtWidgets.QPushButton):
"""
The alarm and warning buttons shown in the top alarmbar.
Class members:
- _mode: Whether this alarm is an ERROR or a WARNING
- _code: The integer code for this alarm.
- _errstr: Test describing this alarm.
- _label: The QLabel to populate with the error message, if the user
clicks our button.
- _snooze_btn: The SnoozeButton to manipulate if the user clicks our
button.
"""
def _on_click_event(self):
"""
The callback function called when the user clicks on an alarm button
"""
# Set the label showing the alarm name
style = """QLabel {
background-color: %s;
color: white;
font-weight: bold;
}""" % self._bkg_color
self._label.setStyleSheet(style)
self._label.setText(self._errstr)
self._label.show()
self._activate_snooze_btn()
def _activate_snooze_btn(self):
"""
Activates the snooze button that will silence this alarm
"""
self._snooze_btn.set_mode(self._mode)
self._snooze_btn.set_code(self._code)
self._snooze_btn.show()
class AlarmHandler:
"""
This class starts a QTimer dedicated to checking is there are any errors
or warnings coming from ESP32
Class members:
- _esp32: ESP32Serial object for communication
- _alarm_time: Timer that will periodically ask the ESP about any alarms
- _err_buttons: {int: AlarmButton} for any active ERROR alarms
- _war_buttons: {int: AlarmButton} for any active WARNING alarms
- _alarmlabel: QLabel showing text of the currently-selected alarm
- _alarmstack: Stack of QPushButtons for active alarms
- _alarmsnooze: QPushButton for snoozing an alarm
- _snooze_btn: SnoozeButton that manipulates _alarmsnooze
"""
def __init__(self, config, esp32, alarmbar, hwfail_func):
"""
Constructor
Arguments: see relevant class members.
"""
self._esp32 = esp32
self._alarm_timer = QtCore.QTimer()
self._alarm_timer.timeout.connect(self.handle_alarms)
self._alarm_timer.start(config["alarminterval"] * 1000)
self._err_buttons = {}
self._war_buttons = {}
self._hwfail_func = hwfail_func
self._hwfail_codes = [1 << code for code in config['hwfail_codes']]
self._alarmlabel = alarmbar.findChild(QtWidgets.QLabel, "alarmlabel")
self._alarmstack = alarmbar.findChild(QtWidgets.QHBoxLayout, "alarmstack")
self._alarmsnooze = alarmbar.findChild(QtWidgets.QPushButton, "alarmsnooze")
self._snooze_btn = SnoozeButton(self._esp32, self, self._alarmsnooze)
def handle_alarms(self):
"""
The callback method which is called periodically to check if the ESP raised any
alarm or warning.
"""
# Retrieve alarms and warnings from the ESP
esp32alarm = self._esp32.get_alarms()
esp32warning = self._esp32.get_warnings()
#
# ALARMS
#
if esp32alarm:
errors = esp32alarm.strerror_all()
alarm_codes = esp32alarm.get_alarm_codes()
for alarm_code, err_str in zip(alarm_codes, errors):
if alarm_code in self._hwfail_codes:
self._hwfail_func(err_str)
print("Critical harware failure")
if alarm_code not in self._err_buttons:
btn = AlarmButton(ERROR, alarm_code, err_str,
self._alarmlabel, self._snooze_btn)
self._alarmstack.addWidget(btn)
self._err_buttons[alarm_code] = btn
#
# WARNINGS
#
if esp32warning:
errors = esp32warning.strerror_all()
warning_codes = esp32warning.get_alarm_codes()
for warning_code, err_str in zip(warning_codes, errors):
if warning_code not in self._war_buttons:
btn = AlarmButton(
WARNING, warning_code, err_str, self._alarmlabel, self._snooze_btn)
self._alarmstack.addWidget(btn)
self._war_buttons[warning_code] = btn
def snooze_alarm(self, code):
"""
Graphically snoozes alarm corresponding to 'code'
Arguments:
- code: integer alarm code
"""
if code not in self._err_buttons:
raise Exception('Cannot snooze code %s as alarm button doesn\'t exist.' % code)
self._err_buttons[code].deleteLater()
del self._err_buttons[code]
self._alarmlabel.setText('')
self._alarmlabel.setStyleSheet('QLabel { background-color: black; }')
self._alarmsnooze.hide()
def snooze_warning(self, code):
"""
Graphically snoozes warning corresponding to 'code'
Arguments:
- code: integer alarm code
"""
if code not in self._war_buttons:
raise Exception('Cannot snooze code %s as warning button doesn\'t exist.' % code)
self._war_buttons[code].deleteLater()
del self._war_buttons[code]
self._alarmlabel.setText('')
self._alarmlabel.setStyleSheet('QLabel { background-color: black; }')
self._alarmsnooze.hide()
class CriticalAlarmHandler:
"""
Handles severe communication and hardware malfunction errors.
These errors have a low chance of recovery, but this class handles irrecoverable as well as
potentially recoverable errors (with options to retry).
"""
def __init__(self, mainparent, esp32):
"""
Main constructor. Grabs necessary widgets from the main window
Arguments:
- mainparent: Reference to the mainwindow widget.
- esp32: Reference to the ESP32 interface.
"""
self._esp32 = esp32
self._toppane = mainparent.toppane
self._criticalerrorpage = mainparent.criticalerrorpage
self._bottombar = mainparent.bottombar
self._criticalerrorbar = mainparent.criticalerrorbar
self._mainparent = mainparent
self.nretry = 0
self._label_criticalerror = mainparent.findChild(QtWidgets.QLabel, "label_criticalerror")
self._label_criticaldetails = mainparent.findChild(
QtWidgets.QLabel,
"label_criticaldetails")
self._button_retrycmd = mainparent.findChild(QtWidgets.QPushButton, "button_retrycmd")
def show_critical_error(self, text, details=""):
"""
Shows the critical error in the mainwindow.
This includes changing the screen to red and displaying a big message to this effect.
"""
self._label_criticalerror.setText(text)
self._toppane.setCurrentWidget(self._criticalerrorpage)
self._bottombar.setCurrentWidget(self._criticalerrorbar)
self._label_criticaldetails.setText(details)
rpi.start_alarm_system()
self._mainparent.repaint()
input("Hang on wait reboot")
def call_system_failure(self, details=""):
"""
Calls a system failure and sets the mainwindow into a state that is irrecoverable without
maintenance support.
"""
self._button_retrycmd.hide()
disp_msg = "*** SYSTEM FAILURE ***\nCall the Maintenance Service"
details = str(details).replace("\n", "")
self.show_critical_error(disp_msg, details=details)
| 33.253012 | 97 | 0.627264 | """
Tools for asking the ESP about any alarms that have been raised,
and telling the user about them if so.
The top alarmbar shows little QPushButtons for each alarm that is currently active.
If the user clicks a button, they are shown the message text and a "snooze" button
for that alarm.
There is a single physical snooze button which is manipulated based on which alarm
the user has selected.
"""
import sys
from communication import rpi
from PyQt5 import QtCore, QtWidgets
BITMAP = {1 << x: x for x in range(32)}
ERROR = 0
WARNING = 1
class SnoozeButton:
"""
Takes care of snoozing alarms.
Class members:
- _esp32: ESP32Serial object for communication
- _alarm_h: AlarmHandler
- _alarmsnooze: QPushButton that user will press
- _code: The alarm code that the user is currently dealing with
- _mode: Whether the current alarm is an ERROR or a WARNING
"""
def __init__(self, esp32, alarm_h, alarmsnooze):
"""
Constructor
Arguments: see relevant class members
"""
self._esp32 = esp32
self._alarm_h = alarm_h
self._alarmsnooze = alarmsnooze
self._alarmsnooze.hide()
self._code = None
self._mode = None
self._alarmsnooze.clicked.connect(self._on_click_snooze)
self._alarmsnooze.setStyleSheet(
'background-color: rgb(0,0,205); color: white; font-weight: bold;')
self._alarmsnooze.setMaximumWidth(150)
def set_code(self, code):
"""
Sets the alarm code
Arguments:
- code: Integer alarm code
"""
self._code = code
self._alarmsnooze.setText('Snooze %s' % str(BITMAP[self._code]))
def set_mode(self, mode):
"""
Sets the mode.
Arguments:
- mode: ALARM or WARNING
"""
self._mode = mode
def show(self):
"""
Shows the snooze alarm button
"""
self._alarmsnooze.show()
def _on_click_snooze(self):
"""
The callback function called when the alarm snooze button is clicked.
"""
if self._mode not in [WARNING, ERROR]:
raise Exception('mode must be alarm or warning.')
# Reset the alarms/warnings in the ESP
# If the ESP connection fails at this
# time, raise an error box
if self._mode == ERROR:
self._esp32.snooze_hw_alarm(self._code)
self._alarm_h.snooze_alarm(self._code)
else:
self._esp32.reset_warnings()
self._alarm_h.snooze_warning(self._code)
class AlarmButton(QtWidgets.QPushButton):
"""
The alarm and warning buttons shown in the top alarmbar.
Class members:
- _mode: Whether this alarm is an ERROR or a WARNING
- _code: The integer code for this alarm.
- _errstr: Test describing this alarm.
- _label: The QLabel to populate with the error message, if the user
clicks our button.
- _snooze_btn: The SnoozeButton to manipulate if the user clicks our
button.
"""
def __init__(self, mode, code, errstr, label, snooze_btn):
super(AlarmButton, self).__init__()
self._mode = mode
self._code = code
self._errstr = errstr
self._label = label
self._snooze_btn = snooze_btn
self.clicked.connect(self._on_click_event)
if self._mode == ERROR:
self._bkg_color = 'red'
elif self._mode == WARNING:
self._bkg_color = 'orange'
else:
raise Exception('Option %s not supported' % self._mode)
self.setText(str(BITMAP[self._code]))
style = """background-color: %s;
color: white;
border: 0.5px solid white;
font-weight: bold;
""" % self._bkg_color
self.setStyleSheet(style)
self.setMaximumWidth(35)
self.setMaximumHeight(30)
def _on_click_event(self):
"""
The callback function called when the user clicks on an alarm button
"""
# Set the label showing the alarm name
style = """QLabel {
background-color: %s;
color: white;
font-weight: bold;
}""" % self._bkg_color
self._label.setStyleSheet(style)
self._label.setText(self._errstr)
self._label.show()
self._activate_snooze_btn()
def _activate_snooze_btn(self):
"""
Activates the snooze button that will silence this alarm
"""
self._snooze_btn.set_mode(self._mode)
self._snooze_btn.set_code(self._code)
self._snooze_btn.show()
class AlarmHandler:
"""
This class starts a QTimer dedicated to checking is there are any errors
or warnings coming from ESP32
Class members:
- _esp32: ESP32Serial object for communication
- _alarm_time: Timer that will periodically ask the ESP about any alarms
- _err_buttons: {int: AlarmButton} for any active ERROR alarms
- _war_buttons: {int: AlarmButton} for any active WARNING alarms
- _alarmlabel: QLabel showing text of the currently-selected alarm
- _alarmstack: Stack of QPushButtons for active alarms
- _alarmsnooze: QPushButton for snoozing an alarm
- _snooze_btn: SnoozeButton that manipulates _alarmsnooze
"""
def __init__(self, config, esp32, alarmbar, hwfail_func):
"""
Constructor
Arguments: see relevant class members.
"""
self._esp32 = esp32
self._alarm_timer = QtCore.QTimer()
self._alarm_timer.timeout.connect(self.handle_alarms)
self._alarm_timer.start(config["alarminterval"] * 1000)
self._err_buttons = {}
self._war_buttons = {}
self._hwfail_func = hwfail_func
self._hwfail_codes = [1 << code for code in config['hwfail_codes']]
self._alarmlabel = alarmbar.findChild(QtWidgets.QLabel, "alarmlabel")
self._alarmstack = alarmbar.findChild(QtWidgets.QHBoxLayout, "alarmstack")
self._alarmsnooze = alarmbar.findChild(QtWidgets.QPushButton, "alarmsnooze")
self._snooze_btn = SnoozeButton(self._esp32, self, self._alarmsnooze)
def handle_alarms(self):
"""
The callback method which is called periodically to check if the ESP raised any
alarm or warning.
"""
# Retrieve alarms and warnings from the ESP
esp32alarm = self._esp32.get_alarms()
esp32warning = self._esp32.get_warnings()
#
# ALARMS
#
if esp32alarm:
errors = esp32alarm.strerror_all()
alarm_codes = esp32alarm.get_alarm_codes()
for alarm_code, err_str in zip(alarm_codes, errors):
if alarm_code in self._hwfail_codes:
self._hwfail_func(err_str)
print("Critical harware failure")
if alarm_code not in self._err_buttons:
btn = AlarmButton(ERROR, alarm_code, err_str,
self._alarmlabel, self._snooze_btn)
self._alarmstack.addWidget(btn)
self._err_buttons[alarm_code] = btn
#
# WARNINGS
#
if esp32warning:
errors = esp32warning.strerror_all()
warning_codes = esp32warning.get_alarm_codes()
for warning_code, err_str in zip(warning_codes, errors):
if warning_code not in self._war_buttons:
btn = AlarmButton(
WARNING, warning_code, err_str, self._alarmlabel, self._snooze_btn)
self._alarmstack.addWidget(btn)
self._war_buttons[warning_code] = btn
def snooze_alarm(self, code):
"""
Graphically snoozes alarm corresponding to 'code'
Arguments:
- code: integer alarm code
"""
if code not in self._err_buttons:
raise Exception('Cannot snooze code %s as alarm button doesn\'t exist.' % code)
self._err_buttons[code].deleteLater()
del self._err_buttons[code]
self._alarmlabel.setText('')
self._alarmlabel.setStyleSheet('QLabel { background-color: black; }')
self._alarmsnooze.hide()
def snooze_warning(self, code):
"""
Graphically snoozes warning corresponding to 'code'
Arguments:
- code: integer alarm code
"""
if code not in self._war_buttons:
raise Exception('Cannot snooze code %s as warning button doesn\'t exist.' % code)
self._war_buttons[code].deleteLater()
del self._war_buttons[code]
self._alarmlabel.setText('')
self._alarmlabel.setStyleSheet('QLabel { background-color: black; }')
self._alarmsnooze.hide()
class CriticalAlarmHandler:
"""
Handles severe communication and hardware malfunction errors.
These errors have a low chance of recovery, but this class handles irrecoverable as well as
potentially recoverable errors (with options to retry).
"""
def __init__(self, mainparent, esp32):
"""
Main constructor. Grabs necessary widgets from the main window
Arguments:
- mainparent: Reference to the mainwindow widget.
- esp32: Reference to the ESP32 interface.
"""
self._esp32 = esp32
self._toppane = mainparent.toppane
self._criticalerrorpage = mainparent.criticalerrorpage
self._bottombar = mainparent.bottombar
self._criticalerrorbar = mainparent.criticalerrorbar
self._mainparent = mainparent
self.nretry = 0
self._label_criticalerror = mainparent.findChild(QtWidgets.QLabel, "label_criticalerror")
self._label_criticaldetails = mainparent.findChild(
QtWidgets.QLabel,
"label_criticaldetails")
self._button_retrycmd = mainparent.findChild(QtWidgets.QPushButton, "button_retrycmd")
def show_critical_error(self, text, details=""):
"""
Shows the critical error in the mainwindow.
This includes changing the screen to red and displaying a big message to this effect.
"""
self._label_criticalerror.setText(text)
self._toppane.setCurrentWidget(self._criticalerrorpage)
self._bottombar.setCurrentWidget(self._criticalerrorbar)
self._label_criticaldetails.setText(details)
rpi.start_alarm_system()
self._mainparent.repaint()
input("Hang on wait reboot")
def call_system_failure(self, details=""):
"""
Calls a system failure and sets the mainwindow into a state that is irrecoverable without
maintenance support.
"""
self._button_retrycmd.hide()
disp_msg = "*** SYSTEM FAILURE ***\nCall the Maintenance Service"
details = str(details).replace("\n", "")
self.show_critical_error(disp_msg, details=details)
| 854 | 0 | 27 |
9b12730fefec9529f41178917873e606484bc096 | 938 | py | Python | Condition/Condition7.py | liyuanyuan11/Python | d94cc7ab39e56c6e24bfc741a30da77590d1d220 | [
"MIT"
] | null | null | null | Condition/Condition7.py | liyuanyuan11/Python | d94cc7ab39e56c6e24bfc741a30da77590d1d220 | [
"MIT"
] | null | null | null | Condition/Condition7.py | liyuanyuan11/Python | d94cc7ab39e56c6e24bfc741a30da77590d1d220 | [
"MIT"
] | null | null | null | choice=input("是否需要输入新的学生信息(Yes/Y表示需要需要录入)?")
studentList=[]
if choice.upper()=="YES" or choice.upper()=="Y":
isError=False
student={}
student["name"]=input("请输入姓名:")
student["ID"]=input("请输入学号:")
score1=float(input("请输入语文成绩:"))
if score1 <= 100 and score1 >= 0:
student["score1"]=score1
else:
print("输入的语文成绩有错误!")
isError=True
score2=float(input("请输入数学成绩:"))
if score2 <= 100 and score2 >= 0:
student["score2"]=score2
else:
print("输入的数学成绩有错误!")
isError=True
score3=float(input("请输入英语成绩:"))
if score3 <= 100 and score3 >= 0:
student["score3"]=score3
else:
print("输入的英语成绩有错误!")
isError=True
if isError==False:
student["total"]=student["score1"]+student["score2"]+student["score3"]
studentList.append(student)
print(student["name"]+"的成绩录入成功!")
else:
print("输入有误,录入成绩失败!") | 30.258065 | 78 | 0.581023 | choice=input("是否需要输入新的学生信息(Yes/Y表示需要需要录入)?")
studentList=[]
if choice.upper()=="YES" or choice.upper()=="Y":
isError=False
student={}
student["name"]=input("请输入姓名:")
student["ID"]=input("请输入学号:")
score1=float(input("请输入语文成绩:"))
if score1 <= 100 and score1 >= 0:
student["score1"]=score1
else:
print("输入的语文成绩有错误!")
isError=True
score2=float(input("请输入数学成绩:"))
if score2 <= 100 and score2 >= 0:
student["score2"]=score2
else:
print("输入的数学成绩有错误!")
isError=True
score3=float(input("请输入英语成绩:"))
if score3 <= 100 and score3 >= 0:
student["score3"]=score3
else:
print("输入的英语成绩有错误!")
isError=True
if isError==False:
student["total"]=student["score1"]+student["score2"]+student["score3"]
studentList.append(student)
print(student["name"]+"的成绩录入成功!")
else:
print("输入有误,录入成绩失败!") | 0 | 0 | 0 |
de1039d3edebb9005dd9b4f0f8045335bdf8b17d | 8,634 | py | Python | Assets/windows/widgets/ElectrodesSynchrowidget.py | Tic-Tac-Toc/EEG_HCI | 0b3bb0a2bd9c10ba732e1887a7552e6bcb6c3c35 | [
"MIT"
] | 1 | 2020-07-25T10:21:49.000Z | 2020-07-25T10:21:49.000Z | Assets/windows/widgets/ElectrodesSynchrowidget.py | Tic-Tac-Toc/EEG_HCI | 0b3bb0a2bd9c10ba732e1887a7552e6bcb6c3c35 | [
"MIT"
] | null | null | null | Assets/windows/widgets/ElectrodesSynchrowidget.py | Tic-Tac-Toc/EEG_HCI | 0b3bb0a2bd9c10ba732e1887a7552e6bcb6c3c35 | [
"MIT"
] | null | null | null | from PySide2.QtWidgets import QWidget
from PySide2.QtCore import Qt, QPoint
from PySide2.QtGui import QPainter, QPen, QColor, QFont
import os
import math
from Assets.mathematical_scripts.util import getElectrodesList
#Function which load electrodes position, all are based on the up-left-quarter electrodes positions
#Function which load the connection from the PSC Matrix Size : Nelec x Nelec
#Stuff to draw head, connection, electrodes using QPainter
| 56.431373 | 166 | 0.571809 | from PySide2.QtWidgets import QWidget
from PySide2.QtCore import Qt, QPoint
from PySide2.QtGui import QPainter, QPen, QColor, QFont
import os
import math
from Assets.mathematical_scripts.util import getElectrodesList
class ElectrodesSynchroWidget(QWidget):
def __init__(self):
super().__init__()
self.setBaseSize(800,850)
self.setMaximumWidth(800)
self.drawConnection = False
#Function which load electrodes position, all are based on the up-left-quarter electrodes positions
def LoadElectrodesInfos(self):
self.electrodesPos = {}
self.electrodesPos["FP1"] = [QPoint(self.center.x() - (self.rx*1/10), self.center.y() - (self.ry*9/10)), False]
self.electrodesPos["F3"] = [QPoint(self.center.x() - (self.rx*2/10), self.center.y() - (self.ry*5/10)), False]
self.electrodesPos["FZ"] = [QPoint(self.center.x(), self.center.y() - (self.ry*9/20)), False]
self.electrodesPos["F7"] = [QPoint(self.center.x() - (self.rx*8/20), self.center.y() - (self.ry*16/20)), False]
self.electrodesPos["FC7"] = [QPoint(self.center.x(), self.center.y() - (self.ry*2/10)), False]
self.electrodesPos["FT7"] = [QPoint(self.center.x() - (self.rx*15/20), self.center.y() - (self.ry*10/20)), False]
self.electrodesPos["T3"] = [QPoint(self.center.x() - (self.rx*9/10), self.center.y()), False]
self.electrodesPos["C3"] = [QPoint(self.center.x() - (self.rx*5/10), self.center.y()), False]
self.electrodesPos["FC3"] = [QPoint(self.center.x() - (self.rx * 3/10), self.center.y() - (self.ry*5/20)), False]
self.electrodesPos["FP2"] = [QPoint(-self.electrodesPos["FP1"][0].x() + 2 * self.center.x(), self.electrodesPos["FP1"][0].y()), False]
self.electrodesPos["F4"] = [QPoint(-self.electrodesPos["F3"][0].x() + 2 * self.center.x(), self.electrodesPos["F3"][0].y()), False]
self.electrodesPos["F8"] = [QPoint(-self.electrodesPos["F7"][0].x() + 2 * self.center.x(), self.electrodesPos["F7"][0].y()), False]
self.electrodesPos["CZ"] = [self.center, False]
self.electrodesPos["C4"] = [QPoint(-self.electrodesPos["C3"][0].x() + 2 * self.center.x(), self.electrodesPos["C3"][0].y()), False]
self.electrodesPos["T4"] = [QPoint(-self.electrodesPos["T3"][0].x() + 2 * self.center.x(), self.electrodesPos["T3"][0].y()), False]
self.electrodesPos["FT8"] = [QPoint(-self.electrodesPos["FT7"][0].x() + 2 * self.center.x(), self.electrodesPos["FT7"][0].y()), False]
self.electrodesPos["FC4"] = [QPoint(-self.electrodesPos["FC3"][0].x() + 2 * self.center.x(), self.electrodesPos["FC3"][0].y()), False]
self.electrodesPos["T5"] = [QPoint(self.electrodesPos["F7"][0].x(), -self.electrodesPos["F7"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["P3"] = [QPoint(self.electrodesPos["F3"][0].x(), -self.electrodesPos["F3"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["PZ"] = [QPoint(self.electrodesPos["FZ"][0].x(), -self.electrodesPos["FZ"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["CPZ"] = [QPoint(self.electrodesPos["FC7"][0].x(), -self.electrodesPos["FC7"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["TP8"] = [QPoint(self.electrodesPos["FT8"][0].x(), -self.electrodesPos["FT8"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["CP4"] = [QPoint(-self.electrodesPos["FC3"][0].x() + 2 * self.center.x(), -self.electrodesPos["FC3"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["P4"] = [QPoint(self.electrodesPos["F4"][0].x(), -self.electrodesPos["F4"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["T6"] = [QPoint(self.electrodesPos["F8"][0].x(), -self.electrodesPos["F8"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["O1"] = [QPoint(self.electrodesPos["FP1"][0].x(), -self.electrodesPos["FP1"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["O2"] = [QPoint(self.electrodesPos["FP2"][0].x(), -self.electrodesPos["FP2"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["TP7"] = [QPoint(-self.electrodesPos["TP8"][0].x() + 2 * self.center.x(), self.electrodesPos["TP8"][0].y()), False]
self.electrodesPos["CP3"] = [QPoint(self.electrodesPos["FC3"][0].x(), -self.electrodesPos["FC3"][0].y() + 2 * self.center.y()), False]
self.electrodesPos["OZ"] = [QPoint(self.center.x(), self.center.y() + (self.ry*19/20)), False]
self.electrodesList = getElectrodesList()
#Function which load the connection from the PSC Matrix Size : Nelec x Nelec
def LoadConnection(self, mat, limitdownvalue, limitupvalue):
self.mat = mat
self.drawConnection = True
self.limitUpValue = limitupvalue
self.limitDownValue = limitdownvalue
self.update()
#Stuff to draw head, connection, electrodes using QPainter
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawHead(qp)
self.LoadElectrodesInfos()
if self.drawConnection:
self.drawConnections(qp)
self.drawElectrode(qp, QPoint(self.center.x() - self.rx - 20, self.center.y()), "A1") #left ear
self.drawElectrode(qp, QPoint(self.center.x() + self.rx + 20, self.center.y()), "A2") #right ear
for key in self.electrodesPos.keys():
self.drawElectrode(qp,self.electrodesPos[key][0], key)
qp.end()
def drawConnections(self, qp):
already_draw = []
for i in range(len(self.mat)):
for j in range(len(self.mat[0])):
if ((i,j) in already_draw or (j,i) in already_draw):
continue
if self.mat[i][j] < self.limitUpValue:
continue
if self.mat[i][j] > self.limitDownValue:
continue
if (self.electrodesPos[self.electrodesList[i]][1] == False):
self.drawElectrode(qp,self.electrodesPos[self.electrodesList[i]][0], self.electrodesList[i])
self.electrodesPos[self.electrodesList[i]][1] = True
if (self.electrodesPos[self.electrodesList[j]][1] == False):
self.drawElectrode(qp,self.electrodesPos[self.electrodesList[j]][0], self.electrodesList[j])
self.electrodesPos[self.electrodesList[j]][1] = True
self.drawLine(qp, self.electrodesPos[self.electrodesList[i]][0], self.electrodesPos[self.electrodesList[j]][0], round(self.mat[i][j],2))
already_draw.append((i,j))
def drawHead(self, qp):
pen = QPen(Qt.black, 1, Qt.SolidLine)
qp.setPen(pen)
self.center = QPoint(self.size().width()/2, self.size().height()/2)
self.rx = (self.size().width() - 150)/2
self.ry = (self.size().height() - 100)/2
qp.drawEllipse(self.center, self.rx, self.ry)
pen.setStyle(Qt.DashLine)
qp.setPen(pen)
qp.drawEllipse(self.center, self.rx - 1/10*self.rx, self.ry - 1/10*self.ry)
qp.drawLine(self.center.x() - self.rx, self.center.y(), self.center.x() + self.rx, self.center.y())
qp.drawLine(self.center.x(), self.center.y() -self.ry, self.center.x(), self.center.y() + self.ry)
def drawElectrode(self, qp, center, name):
pen = QPen(Qt.black, 1, Qt.SolidLine)
qp.setPen(pen)
qp.setBrush(Qt.black)
qp.drawEllipse(center, 5, 5)
qp.setFont(QFont('Arial', 13))
qp.setBrush(Qt.black)
qp.drawText(center.x() - 25,center.y() + 5, 50, 40, Qt.AlignCenter, name)
def drawLine(self, qp, start, end, width):
color = self.floatRgb(width, 0, 1)
pen = QPen(QColor(color[0], color[1], color[2], 255), 3, Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(start, end)
def floatRgb(self, n, cmin, cmax):
R,G,B = 1.0,1.0,1.0
if (n < cmin):
n = cmin
if (n > cmax):
n = cmax
dv = cmax - cmin
if (n < (cmin + 0.25 * dv)):
R = 0
G = 4 * (n - cmin) / dv
elif (n < (cmin + 0.5 * dv)):
R = 0
B = 1 + 4 * (cmin + 0.25 * dv - n) / dv
elif (n < (cmin + 0.75 * dv)):
R = 4 * (n - cmin - 0.5 * dv) / dv
B = 0
else:
G = 1 + 4 * (cmin + 0.75 * dv - n) / dv
B = 0
return R*255,G*255,B*255
| 7,862 | 18 | 268 |
1b4fad87e29955db23773bcb5749ddeee1765df4 | 3,315 | py | Python | app.py | Newbas/beiwe-backend | 3a368519853ce11e7f4ab816ce29634a36af640e | [
"BSD-3-Clause"
] | 51 | 2018-01-12T01:51:44.000Z | 2022-03-10T00:06:34.000Z | app.py | Newbas/beiwe-backend | 3a368519853ce11e7f4ab816ce29634a36af640e | [
"BSD-3-Clause"
] | 213 | 2018-01-10T19:57:03.000Z | 2022-03-29T04:30:53.000Z | app.py | Newbas/beiwe-backend | 3a368519853ce11e7f4ab816ce29634a36af640e | [
"BSD-3-Clause"
] | 35 | 2018-03-08T03:28:53.000Z | 2022-03-11T13:58:44.000Z | import os
from datetime import datetime
import jinja2
from flask import Flask, redirect, render_template
from flask_cors import CORS
from raven.contrib.flask import Sentry
from werkzeug.middleware.proxy_fix import ProxyFix
from api import (admin_api, copy_study_api, dashboard_api, data_access_api, mobile_api,
other_researcher_apis, participant_administration, push_notifications_api, study_api,
survey_api)
from api.tableau_api.views import SummaryStatisticDailyStudyView
from api.tableau_api.web_data_connector import WebDataConnector
from authentication.admin_authentication import is_logged_in
from config.settings import SENTRY_ELASTIC_BEANSTALK_DSN, SENTRY_JAVASCRIPT_DSN
from libs.security import set_secret_key
from libs.sentry import normalize_sentry_dsn
from pages import (admin_pages, data_access_web_form, forest_pages, login_pages, mobile_pages,
participant_pages, survey_designer, system_admin_pages)
# Flask App
app = Flask(__name__, static_folder="frontend/static")
app.jinja_loader = jinja2.ChoiceLoader(
[app.jinja_loader, jinja2.FileSystemLoader("frontend/templates")]
)
set_secret_key(app)
app.wsgi_app = ProxyFix(app.wsgi_app)
CORS(app)
# Flask Blueprints
app.register_blueprint(login_pages.login_pages)
app.register_blueprint(mobile_api.mobile_api)
app.register_blueprint(admin_pages.admin_pages)
app.register_blueprint(mobile_pages.mobile_pages)
app.register_blueprint(system_admin_pages.system_admin_pages)
app.register_blueprint(forest_pages.forest_pages)
app.register_blueprint(survey_designer.survey_designer)
app.register_blueprint(admin_api.admin_api)
app.register_blueprint(participant_administration.participant_administration)
app.register_blueprint(survey_api.survey_api)
app.register_blueprint(study_api.study_api)
app.register_blueprint(data_access_api.data_access_api)
app.register_blueprint(data_access_web_form.data_access_web_form)
app.register_blueprint(other_researcher_apis.other_researcher_apis)
app.register_blueprint(copy_study_api.copy_study_api)
app.register_blueprint(dashboard_api.dashboard_api)
app.register_blueprint(push_notifications_api.push_notifications_api)
app.register_blueprint(participant_pages.participant_pages)
SummaryStatisticDailyStudyView.register_urls(app)
WebDataConnector.register_urls(app)
# Jinja
app.jinja_env.globals['current_year'] = datetime.now().strftime('%Y')
# Sentry is not required, that was too much of a hassle
if SENTRY_ELASTIC_BEANSTALK_DSN:
sentry = Sentry(app, dsn=normalize_sentry_dsn(SENTRY_ELASTIC_BEANSTALK_DSN))
@app.route("/<page>.html")
# this would be called every page load in the context processor
DERIVED_DSN = normalize_sentry_dsn(SENTRY_JAVASCRIPT_DSN)
@app.context_processor
# Extra Production settings
if not __name__ == '__main__':
# Points our custom 404 page (in /frontend/templates) to display on a 404 error
@app.errorhandler(404)
# Extra Debugging settings
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(os.getenv("PORT", "8080")), debug=True)
| 37.247191 | 94 | 0.827451 | import os
from datetime import datetime
import jinja2
from flask import Flask, redirect, render_template
from flask_cors import CORS
from raven.contrib.flask import Sentry
from werkzeug.middleware.proxy_fix import ProxyFix
from api import (admin_api, copy_study_api, dashboard_api, data_access_api, mobile_api,
other_researcher_apis, participant_administration, push_notifications_api, study_api,
survey_api)
from api.tableau_api.views import SummaryStatisticDailyStudyView
from api.tableau_api.web_data_connector import WebDataConnector
from authentication.admin_authentication import is_logged_in
from config.settings import SENTRY_ELASTIC_BEANSTALK_DSN, SENTRY_JAVASCRIPT_DSN
from libs.security import set_secret_key
from libs.sentry import normalize_sentry_dsn
from pages import (admin_pages, data_access_web_form, forest_pages, login_pages, mobile_pages,
participant_pages, survey_designer, system_admin_pages)
# Flask App
app = Flask(__name__, static_folder="frontend/static")
app.jinja_loader = jinja2.ChoiceLoader(
[app.jinja_loader, jinja2.FileSystemLoader("frontend/templates")]
)
set_secret_key(app)
app.wsgi_app = ProxyFix(app.wsgi_app)
CORS(app)
# Flask Blueprints
app.register_blueprint(login_pages.login_pages)
app.register_blueprint(mobile_api.mobile_api)
app.register_blueprint(admin_pages.admin_pages)
app.register_blueprint(mobile_pages.mobile_pages)
app.register_blueprint(system_admin_pages.system_admin_pages)
app.register_blueprint(forest_pages.forest_pages)
app.register_blueprint(survey_designer.survey_designer)
app.register_blueprint(admin_api.admin_api)
app.register_blueprint(participant_administration.participant_administration)
app.register_blueprint(survey_api.survey_api)
app.register_blueprint(study_api.study_api)
app.register_blueprint(data_access_api.data_access_api)
app.register_blueprint(data_access_web_form.data_access_web_form)
app.register_blueprint(other_researcher_apis.other_researcher_apis)
app.register_blueprint(copy_study_api.copy_study_api)
app.register_blueprint(dashboard_api.dashboard_api)
app.register_blueprint(push_notifications_api.push_notifications_api)
app.register_blueprint(participant_pages.participant_pages)
SummaryStatisticDailyStudyView.register_urls(app)
WebDataConnector.register_urls(app)
# Jinja
app.jinja_env.globals['current_year'] = datetime.now().strftime('%Y')
# Sentry is not required, that was too much of a hassle
if SENTRY_ELASTIC_BEANSTALK_DSN:
sentry = Sentry(app, dsn=normalize_sentry_dsn(SENTRY_ELASTIC_BEANSTALK_DSN))
@app.route("/<page>.html")
def strip_dot_html(page):
# Strips away the dot html from pages
return redirect("/%s" % page)
# this would be called every page load in the context processor
DERIVED_DSN = normalize_sentry_dsn(SENTRY_JAVASCRIPT_DSN)
@app.context_processor
def inject_dict_for_all_templates():
return {"SENTRY_JAVASCRIPT_DSN": DERIVED_DSN}
# Extra Production settings
if not __name__ == '__main__':
# Points our custom 404 page (in /frontend/templates) to display on a 404 error
@app.errorhandler(404)
def e404(e):
return render_template("404.html", is_logged_in=is_logged_in()), 404
# Extra Debugging settings
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(os.getenv("PORT", "8080")), debug=True)
| 213 | 0 | 70 |
2adbfcf40ea4068f6a4f9b3cedfd4ccb9affb30c | 2,654 | py | Python | generador_rubrica.py | CC3002-Metodologias/template-tareas | 89447bdae56568e6d94d1a0babeec54fb8222ea9 | [
"MIT"
] | 1 | 2022-02-02T23:01:03.000Z | 2022-02-02T23:01:03.000Z | generador_rubrica.py | CC3002-Metodologias/template-tareas | 89447bdae56568e6d94d1a0babeec54fb8222ea9 | [
"MIT"
] | null | null | null | generador_rubrica.py | CC3002-Metodologias/template-tareas | 89447bdae56568e6d94d1a0babeec54fb8222ea9 | [
"MIT"
] | 1 | 2021-05-30T14:59:08.000Z | 2021-05-30T14:59:08.000Z | """
Modulo que transforma la rúbrica en excel en un formato fácil de pasar a u-cursos
"""
from typing import Tuple
from pathlib import Path
import os
import pandas as pd
INDICE_NOMBRE_ALUMNO = 0
SUBSEC_CODIGO_FUENTE = ("Funcionalidad", "Diseño")
SECCIONES = ("Código Fuente", "Coverage", "Javadoc", "Resumen")
NOTA = "Nota"
COMENTARIOS = ("Comentarios", "Corrector")
SEC_ADICIONALES = "Adicionales"
COVERAGE = "Porcentaje de coverage"
ROOT = Path(os.path.dirname(os.path.realpath(__file__)))
def get_total(puntaje: str):
""" Borra el substring `Total: ` del puntaje """
return puntaje.replace("Total: ", "").replace(",", ".")
def excel_a_string(excel_filename: str) -> Tuple[str, str]:
""" Convierte la rúbrica a una tupla fácil de pasar a un archivo .txt
:param excel_filename: el nombre del excel con la rúbrica
:return: una tupla con el nombre del alumno y los comentarios de revisión
"""
revision = ""
nombre_alumno = ""
nota = ""
a = pd.read_excel(excel_filename, header=None)
for index, row in a.iterrows():
if index == INDICE_NOMBRE_ALUMNO:
nombre_alumno = f"{row[1]}"
item = row[0]
# Puntajes totales de las subsecciones
if item in SUBSEC_CODIGO_FUENTE:
revision += "\n" + "=" * 80 + f"\n{item}: {row[2]} / {get_total(row[3])}\n" \
+ "=" * 80 + "\n"
# Puntajes totales de las secciones
elif item in SECCIONES:
revision += "\n" + "#" * 80 + f"\n{item}: {row[2]} / {get_total(row[3])}\n" \
+ "#" * 80 + "\n"
# Nota final
elif item == NOTA:
nota = f"{row[3]}"
# Notas del corrector
elif item in COMENTARIOS:
revision += f"\n{item}: {row[1]}"
# Descuentos adicionales
elif item == SEC_ADICIONALES:
revision += "\n" + "#" * 80 + f"\n{item}: {row[2]}\n" + "#" * 80 + "\n"
# Detalle de los descuentos
elif index > 1 and row[2] != 0:
if item == COVERAGE:
if row[3] != 0:
revision += f"\n{item}: {row[2] * 100}% = {row[3]}"
else:
revision += f"\n{row[0]}: {row[1]}x{row[2]} = {row[3]}"
if not nombre_alumno:
raise Exception("Falta nombre del alumno!!")
return nombre_alumno, f"Alumno: {nombre_alumno}\nNota: {nota}\n\n{revision}"
if __name__ == '__main__':
NOMBRE_ALUMNO, REVISION = excel_a_string(f"Rubrica_T2.xlsx")
with open(f"Comentarios {NOMBRE_ALUMNO}.txt", "w+",
encoding='utf-8') as comentarios_alumno:
comentarios_alumno.write(REVISION)
| 35.386667 | 89 | 0.576488 | """
Modulo que transforma la rúbrica en excel en un formato fácil de pasar a u-cursos
"""
from typing import Tuple
from pathlib import Path
import os
import pandas as pd
INDICE_NOMBRE_ALUMNO = 0
SUBSEC_CODIGO_FUENTE = ("Funcionalidad", "Diseño")
SECCIONES = ("Código Fuente", "Coverage", "Javadoc", "Resumen")
NOTA = "Nota"
COMENTARIOS = ("Comentarios", "Corrector")
SEC_ADICIONALES = "Adicionales"
COVERAGE = "Porcentaje de coverage"
ROOT = Path(os.path.dirname(os.path.realpath(__file__)))
def get_total(puntaje: str):
""" Borra el substring `Total: ` del puntaje """
return puntaje.replace("Total: ", "").replace(",", ".")
def excel_a_string(excel_filename: str) -> Tuple[str, str]:
""" Convierte la rúbrica a una tupla fácil de pasar a un archivo .txt
:param excel_filename: el nombre del excel con la rúbrica
:return: una tupla con el nombre del alumno y los comentarios de revisión
"""
revision = ""
nombre_alumno = ""
nota = ""
a = pd.read_excel(excel_filename, header=None)
for index, row in a.iterrows():
if index == INDICE_NOMBRE_ALUMNO:
nombre_alumno = f"{row[1]}"
item = row[0]
# Puntajes totales de las subsecciones
if item in SUBSEC_CODIGO_FUENTE:
revision += "\n" + "=" * 80 + f"\n{item}: {row[2]} / {get_total(row[3])}\n" \
+ "=" * 80 + "\n"
# Puntajes totales de las secciones
elif item in SECCIONES:
revision += "\n" + "#" * 80 + f"\n{item}: {row[2]} / {get_total(row[3])}\n" \
+ "#" * 80 + "\n"
# Nota final
elif item == NOTA:
nota = f"{row[3]}"
# Notas del corrector
elif item in COMENTARIOS:
revision += f"\n{item}: {row[1]}"
# Descuentos adicionales
elif item == SEC_ADICIONALES:
revision += "\n" + "#" * 80 + f"\n{item}: {row[2]}\n" + "#" * 80 + "\n"
# Detalle de los descuentos
elif index > 1 and row[2] != 0:
if item == COVERAGE:
if row[3] != 0:
revision += f"\n{item}: {row[2] * 100}% = {row[3]}"
else:
revision += f"\n{row[0]}: {row[1]}x{row[2]} = {row[3]}"
if not nombre_alumno:
raise Exception("Falta nombre del alumno!!")
return nombre_alumno, f"Alumno: {nombre_alumno}\nNota: {nota}\n\n{revision}"
if __name__ == '__main__':
NOMBRE_ALUMNO, REVISION = excel_a_string(f"Rubrica_T2.xlsx")
with open(f"Comentarios {NOMBRE_ALUMNO}.txt", "w+",
encoding='utf-8') as comentarios_alumno:
comentarios_alumno.write(REVISION)
| 0 | 0 | 0 |
e9bfb0d234c88216dad8a70731091d684dd82810 | 774 | py | Python | ParsingAndScraping/assistants.py | KazuruK/FilmGetter | fd84bcaddf17d4b89ad6e5d27095535346c5f4a9 | [
"BSD-3-Clause"
] | 1 | 2021-06-23T13:06:11.000Z | 2021-06-23T13:06:11.000Z | ParsingAndScraping/assistants.py | KazuruK/FilmGetter | fd84bcaddf17d4b89ad6e5d27095535346c5f4a9 | [
"BSD-3-Clause"
] | 1 | 2021-06-23T21:21:52.000Z | 2021-06-23T21:21:52.000Z | ParsingAndScraping/assistants.py | KazuruK/FilmGetter | fd84bcaddf17d4b89ad6e5d27095535346c5f4a9 | [
"BSD-3-Clause"
] | 1 | 2021-06-28T19:14:19.000Z | 2021-06-28T19:14:19.000Z | import re
| 24.967742 | 73 | 0.612403 | import re
def big_num(page_info):
num = re.findall("[0-9]{2,}", str(re.findall("{.*?}", page_info)))
return num
def digits(data):
digit = re.findall("\d+", data)
return digit
def empty_string_cleaner(input):
while "" in input:
input.remove("")
return input
def substring_enter_count(input_name, current_name):
if ':' in current_name or ':' in input_name:
current_name = current_name.replace(':', '.')
input_name = input_name.replace(':', '.')
if 'ё' in current_name or 'ё' in input_name:
current_name = current_name.replace('ё', 'е')
input_name = input_name.replace('ё', 'е')
if input_name[0:len(input_name) - 5].lower() in current_name.lower():
return 1
else:
return 0
| 674 | 0 | 92 |
072a9eaef54c92d54ae728acbb0bedda8b0954e0 | 1,486 | py | Python | snapshot_test/__tests__/test_snapshot_test_case.py | StratoDem/dash-snapshot-testing | dce18fa65d4265abeee414b5b9c8c685edf4f3c1 | [
"MIT"
] | 2 | 2018-03-28T12:07:20.000Z | 2020-02-19T15:02:08.000Z | snapshot_test/__tests__/test_snapshot_test_case.py | StratoDem/dash-snapshot-testing | dce18fa65d4265abeee414b5b9c8c685edf4f3c1 | [
"MIT"
] | 3 | 2018-03-27T13:38:25.000Z | 2018-05-15T15:54:22.000Z | snapshot_test/__tests__/test_snapshot_test_case.py | StratoDem/dash-snapshot-testing | dce18fa65d4265abeee414b5b9c8c685edf4f3c1 | [
"MIT"
] | null | null | null | """
StratoDem Analytics : __test_snapshot_test_case
Principal Author(s) : Michael Clawar
Secondary Author(s) :
Description :
Notes :
March 27, 2018
"""
import dash_html_components as html
from snapshot_test import DashSnapshotTestCase
| 28.576923 | 88 | 0.665545 | """
StratoDem Analytics : __test_snapshot_test_case
Principal Author(s) : Michael Clawar
Secondary Author(s) :
Description :
Notes :
March 27, 2018
"""
import dash_html_components as html
from snapshot_test import DashSnapshotTestCase
class MyUnitTestCase(DashSnapshotTestCase):
def test_component(self):
my_component = html.Div([html.P('wow!'), html.Span('this works')], id='test-id')
self.assertSnapshotEqual(my_component, 'my-test-unique-id')
class MyOtherUnitTestCase(DashSnapshotTestCase):
snapshots_dir = '__snapshots_2__'
def test_component(self):
my_component = html.Div([html.P('wow'), html.Span('another one')], id='test-id')
self.assertSnapshotEqual(my_component, 'my-test-unique-id')
def test_component_2(self):
my_component = html.Div([html.P('wow'), html.P('another one')], id='test-id')
self.assertRaises(
AssertionError,
lambda: self.assertSnapshotEqual(my_component, 'my-test-unique-id'))
def test_component_3(self):
my_component = html.Div([html.P('wow'), html.Span([1, 2, 3])], id='test-id')
self.assertRaises(
AssertionError,
lambda: self.assertSnapshotEqual(my_component, 'my-test-unique-id'))
my_component = html.Div([html.P('wow'), html.Span((1, 2, 3))], id='test-id')
self.assertRaises(
AssertionError,
lambda: self.assertSnapshotEqual(my_component, 'my-test-unique-id'))
| 1,004 | 168 | 72 |
0c3676688e5a248281049921c6aee621f81cc855 | 1,867 | py | Python | command_log/migrations/0001_initial.py | coxm/django-management-command-log | 554d945c4c7cb28ec609b996d3606badaed403db | [
"MIT"
] | 7 | 2020-06-14T17:37:03.000Z | 2021-11-12T15:09:23.000Z | command_log/migrations/0001_initial.py | coxm/django-management-command-log | 554d945c4c7cb28ec609b996d3606badaed403db | [
"MIT"
] | 3 | 2020-04-25T19:19:35.000Z | 2021-07-15T11:12:47.000Z | command_log/migrations/0001_initial.py | coxm/django-management-command-log | 554d945c4c7cb28ec609b996d3606badaed403db | [
"MIT"
] | 1 | 2022-02-09T13:39:37.000Z | 2022-02-09T13:39:37.000Z | # Generated by Django 2.2.5 on 2019-09-25 13:37
from django.db import migrations, models
| 30.606557 | 83 | 0.371719 | # Generated by Django 2.2.5 on 2019-09-25 13:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="ManagementCommandLog",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"app_name",
models.CharField(
help_text="The app containing the management command",
max_length=100,
),
),
(
"command_name",
models.CharField(
help_text="The management command that was executed",
max_length=100,
),
),
("started_at", models.DateTimeField(blank=True, null=True)),
("finished_at", models.DateTimeField(blank=True, null=True)),
(
"exit_code",
models.IntegerField(
blank=True,
default=None,
help_text="0 if the command ran without error.",
null=True,
),
),
(
"output",
models.TextField(
blank=True,
help_text="The output of the command (stored as a string)",
null=True,
),
),
],
)
]
| 0 | 1,753 | 23 |
e03f670f88c281efa8516a99fe80b99f70e35e80 | 3,290 | py | Python | jiminy/vncdriver/screen/pyglet_screen.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | 3 | 2020-03-16T13:50:40.000Z | 2021-06-09T05:26:13.000Z | jiminy/vncdriver/screen/pyglet_screen.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | null | null | null | jiminy/vncdriver/screen/pyglet_screen.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | null | null | null | import logging
import numpy as np
import os
from jiminy import pyprofile
import sys
from jiminy import error
from jiminy.vncdriver import server_messages
logger = logging.getLogger(__name__)
# # TODO: we don't seem to be able to have multiple independent
# # windows at once
# def update_rectangle(self, x, y, width, height, data):
# self._update_rgbarray(x, y, width, height, update)
# def copy_rectangle(self, src_x, src_y, x, y, width, height):
# assert self._window
# rectangle = self.texture.get_region(src_x, self._height-height-src_y, width, height)
# self.texture.blit_into(rectangle.get_image_data(), x, self._height-height-y, 0)
# def fill_rectangle(self, x, y, width, height, color):
# import pyglet
# # While this technically works, it's super slow
# update = np.frombuffer(color, dtype=np.uint8)
# r, g, b = update[self._color_cycle]
# image_pattern = pyglet.image.SolidColorImagePattern(color=(r, g, b, 0))
# image = image_pattern.create_image(width, height)
# self.texture.blit_into(image, x, self._height-height-y, 0)
# def commit(self):
# self._window.clear()
# self._window.switch_to()
# self.texture.blit(0, 0)
# self._is_updated = True
| 37.816092 | 117 | 0.653495 | import logging
import numpy as np
import os
from jiminy import pyprofile
import sys
from jiminy import error
from jiminy.vncdriver import server_messages
logger = logging.getLogger(__name__)
class PygletScreen(object):
def __init__(self, bitmap=None):
self._window = None
self._is_updated = False
self._height, self._width, _ = bitmap.shape
self._initialize()
self.update_rectangle(0, 0, self._width, self._height, bitmap)
def flip(self):
if not self._is_updated:
return
self._is_updated = False
self._window.clear()
self._window.switch_to()
self._window.dispatch_events()
self.texture.blit(0, 0)
self._window.flip()
def _initialize(self):
if not os.environ.get('DISPLAY') and sys.platform.startswith('linux'):
raise error.Error("Cannot render with mode='human' with no DISPLAY variable set.")
import pyglet
self._window = pyglet.window.Window(width=self._width, height=self._height, visible=True)
self._window.dispatch_events()
self.texture = pyglet.image.Texture.create(width=self._width, height=self._height)
def update_rectangle(self, x, y, width, height, data):
bytes = data.tobytes()
pyprofile.incr('vncdriver.pyglet_screen.blit')
pyprofile.incr('vncdriver.pyglet_screen.blit.bytes', len(bytes), unit=pyprofile.BYTES)
import pyglet
image = pyglet.image.ImageData(width, height, 'RGB', bytes, pitch=width * -3)
self.texture.blit_into(image, x, self._height-height-y, 0)
self._is_updated = True
def apply(self, framebuffer_update):
pyprofile.push('vncdriver.pyglet_screen.apply')
for rect in framebuffer_update.rectangles:
if isinstance(rect.encoding,
(server_messages.RAWEncoding, server_messages.ZRLEEncoding, server_messages.ZlibEncoding)):
self.update_rectangle(rect.x, rect.y, rect.width, rect.height, rect.encoding.data)
else:
raise error.Error('Unrecognized encoding: {}'.format(rect.encoding))
pyprofile.pop()
# # TODO: we don't seem to be able to have multiple independent
# # windows at once
# def update_rectangle(self, x, y, width, height, data):
# self._update_rgbarray(x, y, width, height, update)
# def copy_rectangle(self, src_x, src_y, x, y, width, height):
# assert self._window
# rectangle = self.texture.get_region(src_x, self._height-height-src_y, width, height)
# self.texture.blit_into(rectangle.get_image_data(), x, self._height-height-y, 0)
# def fill_rectangle(self, x, y, width, height, color):
# import pyglet
# # While this technically works, it's super slow
# update = np.frombuffer(color, dtype=np.uint8)
# r, g, b = update[self._color_cycle]
# image_pattern = pyglet.image.SolidColorImagePattern(color=(r, g, b, 0))
# image = image_pattern.create_image(width, height)
# self.texture.blit_into(image, x, self._height-height-y, 0)
# def commit(self):
# self._window.clear()
# self._window.switch_to()
# self.texture.blit(0, 0)
# self._is_updated = True
| 1,817 | 6 | 157 |
ab910eb644a52f66b4a21b05f194cd21690d19bc | 2,911 | py | Python | sigpro/aggregations/amplitude/statistical.py | sintel-dev/SigPro | 1151b81ec2344d8ebfff58233c6276448fb09e93 | [
"MIT"
] | 7 | 2021-11-15T09:32:59.000Z | 2022-01-16T10:30:55.000Z | sigpro/aggregations/amplitude/statistical.py | sintel-dev/SigPro | 1151b81ec2344d8ebfff58233c6276448fb09e93 | [
"MIT"
] | 2 | 2021-02-01T09:50:47.000Z | 2021-09-27T15:19:57.000Z | sigpro/aggregations/amplitude/statistical.py | signals-dev/SigPro | 250a924c9cf524ef85ef900f0bcf8e317ee529a6 | [
"MIT"
] | 1 | 2022-03-10T15:50:17.000Z | 2022-03-10T15:50:17.000Z | """Amplitude statistical module."""
import numpy as np
import scipy.stats
def mean(amplitude_values):
"""Calculate the mean value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`mean` value of the input array.
"""
return np.mean(amplitude_values)
def std(amplitude_values):
"""Compute the arithmetic mean value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`std` value of the input array.
"""
return np.std(amplitude_values)
def var(amplitude_values):
"""Compute the variance value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`std` value of the input array.
"""
return np.var(amplitude_values)
def rms(amplitude_values):
"""Compute the RMS (Root Mean Square) of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
RMS of the input array.
"""
return np.sqrt((np.array(amplitude_values) ** 2).mean())
def crest_factor(amplitude_values):
"""Compute the ratio of the peak to the RMS.
Used for estimating the amount of impact wear in a bearing.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
The crest factor of the inputted values.
"""
peak = max(np.abs(amplitude_values))
return peak / rms(amplitude_values)
def skew(amplitude_values):
"""Compute the sample skewness of an array of values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
The skewness value of the input array.
"""
return scipy.stats.skew(amplitude_values)
def kurtosis(amplitude_values, fisher=True, bias=True):
"""Compute the kurtosis ,Fisher or Pearson, of an array of values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
fisher (bool):
If ``True``, Fisher’s definition is used (normal ==> 0.0). If ``False``,
Pearson’s definition is used (normal ==> 3.0). Defaults to ``True``.
bias (bool):
If ``False``, then the calculations are corrected for statistical bias.
Defaults to ``True``.
Returns:
float:
The kurtosis value of the input array. If all values are equal, return
`-3` for Fisher's definition and `0` for Pearson's definition.
"""
return scipy.stats.kurtosis(amplitude_values, fisher=fisher, bias=bias)
| 25.761062 | 84 | 0.630024 | """Amplitude statistical module."""
import numpy as np
import scipy.stats
def mean(amplitude_values):
"""Calculate the mean value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`mean` value of the input array.
"""
return np.mean(amplitude_values)
def std(amplitude_values):
"""Compute the arithmetic mean value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`std` value of the input array.
"""
return np.std(amplitude_values)
def var(amplitude_values):
"""Compute the variance value of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
`std` value of the input array.
"""
return np.var(amplitude_values)
def rms(amplitude_values):
"""Compute the RMS (Root Mean Square) of the values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
RMS of the input array.
"""
return np.sqrt((np.array(amplitude_values) ** 2).mean())
def crest_factor(amplitude_values):
"""Compute the ratio of the peak to the RMS.
Used for estimating the amount of impact wear in a bearing.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
The crest factor of the inputted values.
"""
peak = max(np.abs(amplitude_values))
return peak / rms(amplitude_values)
def skew(amplitude_values):
"""Compute the sample skewness of an array of values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
Returns:
float:
The skewness value of the input array.
"""
return scipy.stats.skew(amplitude_values)
def kurtosis(amplitude_values, fisher=True, bias=True):
"""Compute the kurtosis ,Fisher or Pearson, of an array of values.
Args:
amplitude_values (numpy.ndarray):
Array of floats representing signal values.
fisher (bool):
If ``True``, Fisher’s definition is used (normal ==> 0.0). If ``False``,
Pearson’s definition is used (normal ==> 3.0). Defaults to ``True``.
bias (bool):
If ``False``, then the calculations are corrected for statistical bias.
Defaults to ``True``.
Returns:
float:
The kurtosis value of the input array. If all values are equal, return
`-3` for Fisher's definition and `0` for Pearson's definition.
"""
return scipy.stats.kurtosis(amplitude_values, fisher=fisher, bias=bias)
| 0 | 0 | 0 |
e7073f94415fbfa99c1e8b45241340178c6a928c | 846 | py | Python | hubtty/alembic/versions/7ef7dfa2ca3a_add_change_outdated.py | pabelanger/hubtty | cb67188112c7ba76e366a622b1fad027736ee78b | [
"Apache-2.0"
] | null | null | null | hubtty/alembic/versions/7ef7dfa2ca3a_add_change_outdated.py | pabelanger/hubtty | cb67188112c7ba76e366a622b1fad027736ee78b | [
"Apache-2.0"
] | null | null | null | hubtty/alembic/versions/7ef7dfa2ca3a_add_change_outdated.py | pabelanger/hubtty | cb67188112c7ba76e366a622b1fad027736ee78b | [
"Apache-2.0"
] | null | null | null | """add change.outdated
Revision ID: 7ef7dfa2ca3a
Revises: 37a702b7f58e
Create Date: 2016-08-09 08:59:04.441926
"""
# revision identifiers, used by Alembic.
revision = '7ef7dfa2ca3a'
down_revision = '37a702b7f58e'
import warnings
from alembic import op
import sqlalchemy as sa
from hubtty.dbsupport import sqlite_alter_columns
| 22.263158 | 72 | 0.679669 | """add change.outdated
Revision ID: 7ef7dfa2ca3a
Revises: 37a702b7f58e
Create Date: 2016-08-09 08:59:04.441926
"""
# revision identifiers, used by Alembic.
revision = '7ef7dfa2ca3a'
down_revision = '37a702b7f58e'
import warnings
from alembic import op
import sqlalchemy as sa
from hubtty.dbsupport import sqlite_alter_columns
def upgrade():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
op.add_column('change', sa.Column('outdated', sa.Boolean()))
connection = op.get_bind()
change = sa.sql.table('change',
sa.sql.column('outdated', sa.Boolean()))
connection.execute(change.update().values({'outdated':False}))
sqlite_alter_columns('change', [
sa.Column('outdated', sa.Boolean(), index=True, nullable=False),
])
def downgrade():
pass
| 466 | 0 | 46 |
d4f3d3105c5b4962707b7220f350d5fe6e18e2df | 3,538 | py | Python | blaze/compute/ckernel/tests/test_wrapped_ckernel.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | 1 | 2018-01-24T08:54:04.000Z | 2018-01-24T08:54:04.000Z | blaze/compute/ckernel/tests/test_wrapped_ckernel.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | null | null | null | blaze/compute/ckernel/tests/test_wrapped_ckernel.py | talumbau/blaze | 66c9e61476f11d53f7b734664214537182397739 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
import unittest
import ctypes
import sys
from blaze.compute import ckernel
from blaze.py2help import skipIf
from dynd import nd, ndt, _lowlevel
# On 64-bit windows python 2.6 appears to have
# ctypes bugs in the C calling convention, so
# disable these tests.
win64_py26 = (sys.platform == 'win32' and
ctypes.sizeof(ctypes.c_void_p) == 8 and
sys.version_info[:2] <= (2, 6))
if __name__ == '__main__':
unittest.main()
| 43.679012 | 93 | 0.615885 | from __future__ import absolute_import, division, print_function
import unittest
import ctypes
import sys
from blaze.compute import ckernel
from blaze.py2help import skipIf
from dynd import nd, ndt, _lowlevel
# On 64-bit windows python 2.6 appears to have
# ctypes bugs in the C calling convention, so
# disable these tests.
win64_py26 = (sys.platform == 'win32' and
ctypes.sizeof(ctypes.c_void_p) == 8 and
sys.version_info[:2] <= (2, 6))
class TestWrappedCKernel(unittest.TestCase):
@skipIf(win64_py26, 'py26 win64 ctypes is buggy')
def test_ctypes_callback(self):
# Create a ckernel directly with ctypes
def my_kernel_func(dst_ptr, src_ptr, kdp):
dst = ctypes.c_int32.from_address(dst_ptr)
src = ctypes.c_double.from_address(src_ptr)
dst.value = int(src.value * 3.5)
my_callback = _lowlevel.UnarySingleOperation(my_kernel_func)
with _lowlevel.ckernel.CKernelBuilder() as ckb:
# The ctypes callback object is both the function and the owner
ckernel.wrap_ckernel_func(ckb, 0, my_callback, my_callback)
# Delete the callback to make sure the ckernel is holding a reference
del my_callback
# Make some memory and call the kernel
src_val = ctypes.c_double(4.0)
dst_val = ctypes.c_int32(-1)
ck = ckb.ckernel(_lowlevel.UnarySingleOperation)
ck(ctypes.addressof(dst_val), ctypes.addressof(src_val))
self.assertEqual(dst_val.value, 14)
@skipIf(win64_py26, 'py26 win64 ctypes is buggy')
def test_ctypes_callback_deferred(self):
# Create a deferred ckernel via a closure
def instantiate_ckernel(out_ckb, ckb_offset, types, meta,
kerntype, ectx):
out_ckb = _lowlevel.CKernelBuilder(out_ckb)
def my_kernel_func_single(dst_ptr, src_ptr, kdp):
dst = ctypes.c_int32.from_address(dst_ptr)
src = ctypes.c_double.from_address(src_ptr[0])
dst.value = int(src.value * 3.5)
def my_kernel_func_strided(dst_ptr, dst_stride, src_ptr, src_stride, count, kdp):
src_ptr0 = src_ptr[0]
src_stride0 = src_stride[0]
for i in range(count):
my_kernel_func_single(dst_ptr, [src_ptr0], kdp)
dst_ptr += dst_stride
src_ptr0 += src_stride0
if kerntype == 'single':
kfunc = _lowlevel.ExprSingleOperation(my_kernel_func_single)
else:
kfunc = _lowlevel.ExprStridedOperation(my_kernel_func_strided)
return ckernel.wrap_ckernel_func(out_ckb, ckb_offset,
kfunc, kfunc)
ckd = _lowlevel.ckernel_deferred_from_pyfunc(instantiate_ckernel,
[ndt.int32, ndt.float64])
# Test calling the ckd
out = nd.empty(ndt.int32)
in0 = nd.array(4.0, type=ndt.float64)
ckd.__call__(out, in0)
self.assertEqual(nd.as_py(out), 14)
# Also call it lifted
ckd_lifted = _lowlevel.lift_ckernel_deferred(ckd,
['2 * var * int32', '2 * var * float64'])
out = nd.empty('2 * var * int32')
in0 = nd.array([[1.0, 3.0, 2.5], [1.25, -1.5]], type='2 * var * float64')
ckd_lifted.__call__(out, in0)
self.assertEqual(nd.as_py(out), [[3, 10, 8], [4, -5]])
if __name__ == '__main__':
unittest.main()
| 2,812 | 184 | 23 |
c89f1836cbf6c42b637c802a959d94333ed1e29b | 436 | py | Python | src/check_duplicated_image.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 2e0942b42b4a9cd974cdddc151675738dc8a8cb4 | [
"Apache-2.0"
] | 174 | 2018-06-04T02:12:34.000Z | 2022-03-30T07:01:29.000Z | src/check_duplicated_image.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 2e0942b42b4a9cd974cdddc151675738dc8a8cb4 | [
"Apache-2.0"
] | 9 | 2018-06-05T11:32:05.000Z | 2021-09-13T09:10:05.000Z | src/check_duplicated_image.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 2e0942b42b4a9cd974cdddc151675738dc8a8cb4 | [
"Apache-2.0"
] | 55 | 2018-06-05T09:50:52.000Z | 2022-03-30T15:58:00.000Z | import cv2
from glob import glob
import numpy as np
from tqdm import tqdm
im = cv2.imread('/home/storage/lsy/fashion/FashionAI_Keypoint_Detection/wu_train/Images/blouse/ff210d1818f907693a03a6ea2eb39f77.jpg')
for fn in tqdm(glob('/home/storage/lsy/fashion/FashionAI_Keypoint_Detection/r1_train/Images/blouse/*.jpg')):
im2 = cv2.imread(fn)
if im.shape == im2.shape:
if np.all(im==im2):
print(fn)
| 33.538462 | 134 | 0.715596 | import cv2
from glob import glob
import numpy as np
from tqdm import tqdm
im = cv2.imread('/home/storage/lsy/fashion/FashionAI_Keypoint_Detection/wu_train/Images/blouse/ff210d1818f907693a03a6ea2eb39f77.jpg')
for fn in tqdm(glob('/home/storage/lsy/fashion/FashionAI_Keypoint_Detection/r1_train/Images/blouse/*.jpg')):
im2 = cv2.imread(fn)
if im.shape == im2.shape:
if np.all(im==im2):
print(fn)
| 0 | 0 | 0 |
11f4d8bc2427126009a3bfb0aa65034fe9f496a4 | 424 | py | Python | zc_test_app/manage.py | zconnect-iot/zconnect-django | 5c569f54f100e23d72e2ac4de795739ea461a431 | [
"MIT"
] | 2 | 2018-08-19T16:16:39.000Z | 2019-06-11T02:23:50.000Z | zc_test_app/manage.py | zconnect-iot/zconnect-django | 5c569f54f100e23d72e2ac4de795739ea461a431 | [
"MIT"
] | 2 | 2018-07-05T09:51:54.000Z | 2018-07-06T13:12:04.000Z | zc_test_app/manage.py | zconnect-iot/zconnect-django | 5c569f54f100e23d72e2ac4de795739ea461a431 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from os.path import abspath, dirname
if __name__ == "__main__":
project_dir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, project_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zc_test_app.settings")
from django.core.management import execute_from_command_line
from django import setup
setup()
execute_from_command_line(sys.argv)
| 23.555556 | 75 | 0.75 | #!/usr/bin/env python
import os
import sys
from os.path import abspath, dirname
if __name__ == "__main__":
project_dir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, project_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zc_test_app.settings")
from django.core.management import execute_from_command_line
from django import setup
setup()
execute_from_command_line(sys.argv)
| 0 | 0 | 0 |
60e82be5c15dff172906ce74087dc5a47f778996 | 1,466 | py | Python | git_mv.py | basicincome/unpcoin-core | ce1ec19aef290015cf9acfc7ba8bae25b009a321 | [
"MIT"
] | 1 | 2016-07-04T01:33:11.000Z | 2016-07-04T01:33:11.000Z | git_mv.py | basicincome/unpcoin-core | ce1ec19aef290015cf9acfc7ba8bae25b009a321 | [
"MIT"
] | null | null | null | git_mv.py | basicincome/unpcoin-core | ce1ec19aef290015cf9acfc7ba8bae25b009a321 | [
"MIT"
] | null | null | null | dirs = [
"./src/mobicoin-cli-res.rc",
"./src/mobicoin-cli.cpp",
"./src/mobicoind-res.rc",
"./src/mobicoind.cpp",
"./src/qt/locale/mobicoin_bar.ts",
"./src/qt/locale/mobicoin_bg.ts",
"./src/qt/locale/mobicoin_ca.ts",
"./src/qt/locale/mobicoin_cmn.ts",
"./src/qt/locale/mobicoin_cs.ts",
"./src/qt/locale/mobicoin_da.ts",
"./src/qt/locale/mobicoin_de.ts",
"./src/qt/locale/mobicoin_el.ts",
"./src/qt/locale/mobicoin_en.ts",
"./src/qt/locale/mobicoin_eo.ts",
"./src/qt/locale/mobicoin_es.ts",
"./src/qt/locale/mobicoin_fi.ts",
"./src/qt/locale/mobicoin_fr.ts",
"./src/qt/locale/mobicoin_hu_HU.ts",
"./src/qt/locale/mobicoin_it.ts",
"./src/qt/locale/mobicoin_lv_LV.ts",
"./src/qt/locale/mobicoin_nb.ts",
"./src/qt/locale/mobicoin_nl.ts",
"./src/qt/locale/mobicoin_pl.ts",
"./src/qt/locale/mobicoin_pt.ts",
"./src/qt/locale/mobicoin_pt_BR.ts",
"./src/qt/locale/mobicoin_ru.ts",
"./src/qt/locale/mobicoin_sk.ts",
"./src/qt/locale/mobicoin_sv.ts",
"./src/qt/locale/mobicoin_tr.ts",
"./src/qt/locale/mobicoin_vi.ts",
"./src/qt/locale/mobicoin_zh_CN.ts",
"./src/qt/locale/mobicoin_zh_HK.ts",
"./src/qt/mobicoin.cpp",
"./src/qt/mobicoin.qrc",
"./src/qt/mobicoinstrings.cpp",
"./src/qt/res/icons/mobicoin.icns",
"./src/qt/res/images/mobicoin_logo_horizontal.png",
"./src/qt/res/mobicoin-qt-res.rc",
"./src/test/test_mobicoin.cpp",
]
import os
src = "mobicoin"
dst = "unpay"
for s in dirs:
d = s.replace(src,dst)
cmd = "git mv "+ s + " " + d
print cmd
os.system(cmd)
| 25.719298 | 51 | 0.688267 | dirs = [
"./src/mobicoin-cli-res.rc",
"./src/mobicoin-cli.cpp",
"./src/mobicoind-res.rc",
"./src/mobicoind.cpp",
"./src/qt/locale/mobicoin_bar.ts",
"./src/qt/locale/mobicoin_bg.ts",
"./src/qt/locale/mobicoin_ca.ts",
"./src/qt/locale/mobicoin_cmn.ts",
"./src/qt/locale/mobicoin_cs.ts",
"./src/qt/locale/mobicoin_da.ts",
"./src/qt/locale/mobicoin_de.ts",
"./src/qt/locale/mobicoin_el.ts",
"./src/qt/locale/mobicoin_en.ts",
"./src/qt/locale/mobicoin_eo.ts",
"./src/qt/locale/mobicoin_es.ts",
"./src/qt/locale/mobicoin_fi.ts",
"./src/qt/locale/mobicoin_fr.ts",
"./src/qt/locale/mobicoin_hu_HU.ts",
"./src/qt/locale/mobicoin_it.ts",
"./src/qt/locale/mobicoin_lv_LV.ts",
"./src/qt/locale/mobicoin_nb.ts",
"./src/qt/locale/mobicoin_nl.ts",
"./src/qt/locale/mobicoin_pl.ts",
"./src/qt/locale/mobicoin_pt.ts",
"./src/qt/locale/mobicoin_pt_BR.ts",
"./src/qt/locale/mobicoin_ru.ts",
"./src/qt/locale/mobicoin_sk.ts",
"./src/qt/locale/mobicoin_sv.ts",
"./src/qt/locale/mobicoin_tr.ts",
"./src/qt/locale/mobicoin_vi.ts",
"./src/qt/locale/mobicoin_zh_CN.ts",
"./src/qt/locale/mobicoin_zh_HK.ts",
"./src/qt/mobicoin.cpp",
"./src/qt/mobicoin.qrc",
"./src/qt/mobicoinstrings.cpp",
"./src/qt/res/icons/mobicoin.icns",
"./src/qt/res/images/mobicoin_logo_horizontal.png",
"./src/qt/res/mobicoin-qt-res.rc",
"./src/test/test_mobicoin.cpp",
]
import os
src = "mobicoin"
dst = "unpay"
for s in dirs:
d = s.replace(src,dst)
cmd = "git mv "+ s + " " + d
print cmd
os.system(cmd)
| 0 | 0 | 0 |
26c014b73febd2c81c1023be79696513a2a16202 | 2,144 | py | Python | tamr_unify_client/models/attribute_configuration/resource.py | charlottemoremen/unify-client-python | d50e3e829c5dbcbad72d6eec0a606fc4b2b25e4d | [
"Apache-2.0"
] | null | null | null | tamr_unify_client/models/attribute_configuration/resource.py | charlottemoremen/unify-client-python | d50e3e829c5dbcbad72d6eec0a606fc4b2b25e4d | [
"Apache-2.0"
] | null | null | null | tamr_unify_client/models/attribute_configuration/resource.py | charlottemoremen/unify-client-python | d50e3e829c5dbcbad72d6eec0a606fc4b2b25e4d | [
"Apache-2.0"
] | null | null | null | from tamr_unify_client.models.base_resource import BaseResource
class AttributeConfiguration(BaseResource):
"""The configurations of Unify Attributes.
See https://docs.tamr.com/reference#the-attribute-configuration-object
"""
@classmethod
@property
def relative_id(self):
""":type: str"""
return self._data.get("relativeId")
@property
def id(self):
""":type: str"""
return self._data.get("id")
@property
def relative_attribute_id(self):
""":type: str"""
return self._data.get("relativeAttributeId")
@property
def attribute_role(self):
""":type: str"""
return self._data.get("attributeRole")
@property
def similarity_function(self):
""":type: str"""
return self._data.get("similarityFunction")
@property
def enabled_for_ml(self):
""":type: bool"""
return self._data.get("enabledForMl")
@property
def tokenizer(self):
""":type: str"""
return self._data.get("tokenizer")
@property
def numeric_field_resolution(self):
""":type: array (?) """
return self._data.get("numericFieldResolution")
@property
def attribute_name(self):
""":type: str"""
return self._data.get("attributeName")
| 28.586667 | 75 | 0.609142 | from tamr_unify_client.models.base_resource import BaseResource
class AttributeConfiguration(BaseResource):
"""The configurations of Unify Attributes.
See https://docs.tamr.com/reference#the-attribute-configuration-object
"""
@classmethod
def from_json(
cls, client, resource_json, api_path=None
) -> "AttributeConfiguration":
return super().from_data(client, resource_json, api_path)
@property
def relative_id(self):
""":type: str"""
return self._data.get("relativeId")
@property
def id(self):
""":type: str"""
return self._data.get("id")
@property
def relative_attribute_id(self):
""":type: str"""
return self._data.get("relativeAttributeId")
@property
def attribute_role(self):
""":type: str"""
return self._data.get("attributeRole")
@property
def similarity_function(self):
""":type: str"""
return self._data.get("similarityFunction")
@property
def enabled_for_ml(self):
""":type: bool"""
return self._data.get("enabledForMl")
@property
def tokenizer(self):
""":type: str"""
return self._data.get("tokenizer")
@property
def numeric_field_resolution(self):
""":type: array (?) """
return self._data.get("numericFieldResolution")
@property
def attribute_name(self):
""":type: str"""
return self._data.get("attributeName")
def __repr__(self):
return (
f"{self.__class__.__module__}."
f"{self.__class__.__qualname__}("
f"relative_id={self.relative_id!r}, "
f"id={self.id!r}, "
f"relative_attribute_id={self.relative_attribute_id!r}, "
f"attribute_role={self.attribute_role!r}, "
f"similarity_function={self.similarity_function!r}, "
f"enabled_for_ml={self.enabled_for_ml!r}, "
f"tokenizer={self.tokenizer!r}, "
f"numeric_field_resolution={self.numeric_field_resolution!r}, "
f"attribute_name={self.attribute_name!r})"
)
| 770 | 0 | 53 |
42beea81645435928667807329fd7b256710b23b | 2,923 | py | Python | pypy/tool/memusage/test/test_log2gnumeric.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | pypy/tool/memusage/test/test_log2gnumeric.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | pypy/tool/memusage/test/test_log2gnumeric.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 55 | 2015-08-16T02:41:30.000Z | 2022-03-20T20:33:35.000Z | from pypy.tool.memusage import log2gnumeric
log = """
[1000] ...
[2000] {gc-collect
.----------- Full collection ------------------
| used before collection:
| in ArenaCollection: 500 bytes
| raw_malloced: 100 bytes
| used after collection:
| in ArenaCollection: 300 bytes
| raw_malloced: 50 bytes
| number of major collects: 1
`----------------------------------------------
[3000] gc-collect}
[4000] {gc-collect
.----------- Full collection ------------------
| used before collection:
| in ArenaCollection: 600 bytes
| raw_malloced: 200 bytes
| used after collection:
| in ArenaCollection: 400 bytes
| raw_malloced: 100 bytes
| number of major collects: 1
`----------------------------------------------
[5000] gc-collect}
...
...
[6000] {translation-task
starting annotate
...
...
[7000] translation-task}
[8000] {translation-task
starting rtype_lltype
...
...
[9000] translation-task}
...
[a000] ...
"""
log = log.replace('\n', '')
| 29.23 | 62 | 0.559015 | from pypy.tool.memusage import log2gnumeric
log = """
[1000] ...
[2000] {gc-collect
.----------- Full collection ------------------
| used before collection:
| in ArenaCollection: 500 bytes
| raw_malloced: 100 bytes
| used after collection:
| in ArenaCollection: 300 bytes
| raw_malloced: 50 bytes
| number of major collects: 1
`----------------------------------------------
[3000] gc-collect}
[4000] {gc-collect
.----------- Full collection ------------------
| used before collection:
| in ArenaCollection: 600 bytes
| raw_malloced: 200 bytes
| used after collection:
| in ArenaCollection: 400 bytes
| raw_malloced: 100 bytes
| number of major collects: 1
`----------------------------------------------
[5000] gc-collect}
...
...
[6000] {translation-task
starting annotate
...
...
[7000] translation-task}
[8000] {translation-task
starting rtype_lltype
...
...
[9000] translation-task}
...
[a000] ...
"""
log = log.replace('\n', '')
def test_get_clock_range():
minclock, maxclock = log2gnumeric.get_clock_range(log)
assert minclock == 0x1000
assert maxclock == 0xa000
def test_gc_collect_rows():
rows = list(log2gnumeric.gc_collect_rows(0x1000, log))
assert len(rows) == 3
assert rows[0] == ( 'clock', 'gc-before', 'gc-after')
assert rows[1] == (0x3000-0x1000, 500+100, 300+ 50)
assert rows[2] == (0x5000-0x1000, 600+200, 400+100)
def test_tasks_rows():
rows = list(log2gnumeric.tasks_rows(0x1000, log))
assert len(rows) == 3
assert rows[0] == ( 'clock', None, 'task')
assert rows[1] == (0x6000-0x1000, 1, 'annotate')
assert rows[2] == (0x8000-0x1000, 1, 'rtype_lltype')
def test_vmrss_rows():
lines = ['100', '200', '300']
rows = list(log2gnumeric.vmrss_rows_impl(lines, 2000))
assert len(rows) == 4
assert rows[0] == ('inferred clock', 'VmRSS')
assert rows[1] == (0, 100)
assert rows[2] == (1000, 200)
assert rows[3] == (2000, 300)
def test_loops_rows():
log = """\
[1000] {jit-mem-looptoken-alloc
allocating Loop # 0
[1001] jit-mem-looptoken-alloc}
[2000] {jit-mem-looptoken-alloc
allocating Loop # 1
[2001] jit-mem-looptoken-alloc}
[3000] {jit-mem-looptoken-alloc
allocating Bridge # 1 of Loop # 0
[3001] jit-mem-looptoken-alloc}
[4000] {jit-mem-looptoken-free
freeing Loop # 0 with 1 attached bridges
[4001]
"""
log = log.replace('\n', '')
rows = list(log2gnumeric.loops_rows(0x1000, log))
assert len(rows) == 5
assert rows[0] == ('clock', 'total', 'loops', 'bridges')
assert rows[1] == ( 0x0, 1, 1, 0)
assert rows[2] == ( 0x1000, 2, 2, 0)
assert rows[3] == ( 0x2000, 3, 2, 1)
assert rows[4] == ( 0x3000, 1, 1, 0)
| 1,705 | 0 | 119 |
5376a92615395e3506ea2359c51cd259ba63e6f8 | 74 | py | Python | python-for-beginners/07 - Error handling/logic.py | vijayraavi/c9-python-getting-started | 345c81fb210601836d3618ff7bd491256ae62fa9 | [
"MIT"
] | 8,041 | 2019-09-17T17:25:51.000Z | 2022-03-31T11:38:07.000Z | python-for-beginners/07 - Error handling/logic.py | magicsolmyr/c9-python-getting-started | a74d0ea8451a9709dcebbb29ae931a9cb82fc695 | [
"MIT"
] | 43 | 2019-09-20T15:47:26.000Z | 2022-01-23T20:33:28.000Z | python-for-beginners/07 - Error handling/logic.py | magicsolmyr/c9-python-getting-started | a74d0ea8451a9709dcebbb29ae931a9cb82fc695 | [
"MIT"
] | 2,377 | 2019-09-17T18:16:53.000Z | 2022-03-30T15:38:07.000Z | x = 206
y = 42
if x < y:
print(str(x) + ' is greater than ' + str(y))
| 14.8 | 48 | 0.5 | x = 206
y = 42
if x < y:
print(str(x) + ' is greater than ' + str(y))
| 0 | 0 | 0 |
9cb8c87b0b7cb61df8d3b6aa351fa5cc81c0f1bc | 28,102 | py | Python | projects/PanopticFCN_cityscapes/panopticfcn/panoptic_seg.py | fatihyildiz-cs/detectron2 | 700b1e6685ca95a60e27cb961f363a2ca7f30d3c | [
"Apache-2.0"
] | null | null | null | projects/PanopticFCN_cityscapes/panopticfcn/panoptic_seg.py | fatihyildiz-cs/detectron2 | 700b1e6685ca95a60e27cb961f363a2ca7f30d3c | [
"Apache-2.0"
] | null | null | null | projects/PanopticFCN_cityscapes/panopticfcn/panoptic_seg.py | fatihyildiz-cs/detectron2 | 700b1e6685ca95a60e27cb961f363a2ca7f30d3c | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
from torch.nn import functional as F
from PIL import Image
import numpy as np
import json
from detectron2.data import MetadataCatalog
from detectron2.structures import ImageList, Instances, BitMasks
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from .gt_generate import GenerateGT
from .loss import sigmoid_focal_loss, weighted_dice_loss
from .head import build_position_head, build_kernel_head, build_feature_encoder, build_thing_generator, build_stuff_generator
from .backbone_utils import build_semanticfpn, build_backbone
from .utils import topk_score, multi_apply
__all__ = ["PanopticFCN"]
@META_ARCH_REGISTRY.register()
class PanopticFCN(nn.Module):
"""
Implement PanopticFCN the paper :paper:`Fully Convolutional Networks for Panoptic Segmentation`.
"""
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": Instances
* "sem_seg": semantic segmentation ground truth.
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model, used in inference.
Returns:
list[dict]:
each dict is the results for one image. The dict contains the following keys:
* "instances": Instances results.
* "sem_seg": Semantic Segmentation results.
* "panoptic_seg": available when `MODEL.INFERENCE.COMBINE.ENABLE`.
See the return value of
:func:`combine_thing_and_stuff` for its format.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [self.normalizer(x) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
encode_feat = self.semantic_fpn(features)
encode_feat = self.feature_encoder(encode_feat)
features_in = [features[_feat] for _feat in self.in_feature]
pred_centers, pred_regions, pred_weights = multi_apply(self.forward_single_level, features_in)
if self.training:
gt_dict = self.get_ground_truth.generate(batched_inputs, images, pred_weights, encode_feat)
return self.losses(pred_centers, pred_regions, pred_weights, encode_feat, gt_dict)
else:
return self.inference(batched_inputs, images, pred_centers, pred_regions, pred_weights, encode_feat)
def losses(self, pred_centers, pred_regions, pred_weights, encode_feat, gt_dict):
"""
Calculate losses of prediction with generated gt dict.
Args:
pred_centers: prediction for object centers
pred_regions: prediction for stuff regions
pred_weights: generated kernel weights for things and stuff
encode_feat: encoded high-resolution feature
gt_dict(dict): a dict contains all information of gt
gt_dict = {
"center": gt gaussian scoremap for things,
"inst": gt instance target for things,
"index": gt index for things,
"index_mask": gt index mask for things,
"class": gt classes for things,
"sem_scores": gt semantic score map for stuff,
"sem_labels":gt semantic target for stuff,
"sem_index": gt index for stuff,
"sem_masks": gt index mask for stuff,
}
Returns:
loss(dict): a dict contains all information of loss function
loss = {
"loss_pos_th": position loss for things,
"loss_pos_st": position loss for stuff,
"loss_seg_th": segmentation loss for things,
"loss_seg_st": segmentation loss for stuff,
}
"""
feat_shape = encode_feat.shape
encode_feat = encode_feat.reshape(*feat_shape[:2], -1)
loss_pos_ths, loss_pos_sts, idx_feat_th, weighted_values, idx_feat_st, thing_nums, stuff_nums = \
multi_apply(self.loss_single_level, pred_centers,
pred_regions, pred_weights,
gt_dict["center"], gt_dict["inst"],
gt_dict["index_mask"], gt_dict["class"],
gt_dict["sem_scores"], gt_dict["sem_masks"],
gt_dict["sem_index"])
thing_num = sum(thing_nums)
stuff_num = sum(stuff_nums)
idx_feat_th = torch.cat(idx_feat_th, dim=2)
weighted_values = torch.cat(weighted_values, dim=1)
idx_feat_st = torch.cat(idx_feat_st, dim=1)
idx_feat_st = idx_feat_st.reshape(-1, *idx_feat_st.shape[2:])
thing_pred, _ = self.thing_generator(encode_feat, feat_shape, idx_feat_th, thing_num)
stuff_pred, _ = self.stuff_generator(encode_feat, feat_shape, idx_feat_st, stuff_num)
# for thing
thing_gt_idx = [_gt[:,:thing_nums[_idx]] for _idx, _gt in enumerate(gt_dict["index_mask"])]
thing_gt_idx = torch.cat(thing_gt_idx, dim=1)
thing_gt_idx = thing_gt_idx.reshape(-1).bool()
thing_gt_num = int(thing_gt_idx.sum())
thing_gt = [_gt[:,:thing_nums[_idx],...] for _idx, _gt in enumerate(gt_dict["inst"])]
thing_gt = torch.cat(thing_gt, dim=1)
loss_thing = weighted_dice_loss(thing_pred, thing_gt,
gt_num=thing_gt_num,
index_mask=thing_gt_idx,
instance_num=thing_num,
weighted_val=weighted_values,
weighted_num=self.weighted_num,
mode="thing",
reduction="sum")
# for stuff
stuff_gt_idx = [_gt[:,:stuff_nums[_idx]] for _idx, _gt in enumerate(gt_dict["sem_index"])]
stuff_gt_idx = torch.cat(stuff_gt_idx, dim=1)
stuff_gt_idx = stuff_gt_idx.reshape(-1).bool()
stuff_gt_num = int(stuff_gt_idx.sum())
stuff_gt = [_gt[:,:stuff_nums[_idx],...] for _idx, _gt in enumerate(gt_dict["sem_labels"])]
stuff_gt = torch.cat(stuff_gt, dim=1)
loss_stuff = weighted_dice_loss(stuff_pred, stuff_gt,
gt_num=stuff_gt_num,
index_mask=stuff_gt_idx,
instance_num=stuff_num,
weighted_val=1.0,
weighted_num=1,
mode="stuff",
reduction="sum")
loss = {}
# position loss
loss["loss_pos_th"] = self.pos_weight * sum(loss_pos_ths) / max(thing_gt_num, 1)
loss["loss_pos_st"] = self.pos_weight * sum(loss_pos_sts) / max(feat_shape[0],1)
# segmentation loss
loss["loss_seg_th"] = self.seg_weight * loss_thing / max(thing_gt_num, 1)
loss["loss_seg_st"] = self.seg_weight * loss_stuff / max(stuff_gt_num, 1)
return loss
@torch.no_grad()
@torch.no_grad()
def inference(self, batch_inputs, images, pred_centers, pred_regions, pred_weights, encode_feat):
"""
Panoptic FCN inference process.
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`
image: ImageList in detectron2.structures
pred_centers: prediction for object centers
pred_regions: prediction for stuff regions
pred_weights: generated kernel weights for things and stuff
encode_feat: encoded high-resolution feature
Returns:
processed_results(dict): a dict contains all predicted results
processed_results={
"sem_seg": prediction of stuff for semantic segmentation eval,
"instances": prediction of things for instance segmentation eval,
"panoptic_seg": prediction of both for panoptic segmentation eval.
}
"""
results = batch_inputs
processed_results = []
for img_idx, result_img in enumerate(results):
if "instances" in result_img.keys():
img_shape = result_img["instances"].image_size
else:
img_shape = result_img["image"].shape[-2:]
ori_shape = (result_img["height"], result_img["width"])
encode_feat = encode_feat[img_idx].unsqueeze(0)
feat_shape = encode_feat.shape
encode_feat = encode_feat.reshape(*feat_shape[:2], -1)
result_instance = None
pred_regions = [_pred[img_idx].unsqueeze(0) for _pred in pred_regions]
pred_weights = [_pred[img_idx].unsqueeze(0) for _pred in pred_weights]
pred_centers = [_pred[img_idx].unsqueeze(0) for _pred in pred_centers]
pool_size = [3,3,3,5,5]
idx_feat_th, class_ths, score_ths, thing_num, idx_feat_st, score_sts, class_sts, stuff_num = \
multi_apply(self.inference_single_level, pred_centers,\
pred_regions, pred_weights, pool_size)
thing_num = sum(thing_num)
if thing_num == 0:
result_instance = Instances(ori_shape, pred_masks=[], pred_boxes=[],
pred_classes=[], scores=[])
else:
class_ths = [_class for _class in class_ths if len(_class)>0]
score_ths = [_score for _score in score_ths if len(_score)>0]
idx_feat_th = [_feat for _feat in idx_feat_th if len(_feat)>0]
class_ths = torch.cat(class_ths, dim=0)
score_ths = torch.cat(score_ths, dim=0)
idx_feat_th = torch.cat(idx_feat_th, dim=2)
keep = torch.argsort(score_ths, descending=True)
idx_feat_th = idx_feat_th[:,:,keep]
score_ths = score_ths[keep]
class_ths = class_ths[keep]
stuff_num = sum(stuff_num)
if stuff_num == 0:
class_sts, idx_feat_st, score_sts = [], [], []
else:
score_sts = [_score for _score in score_sts if len(_score)>0]
class_sts = [_cate_sem for _cate_sem in class_sts if len(_cate_sem)>0]
idx_feat_st = [_feat for _feat in idx_feat_st if len(_feat)>0]
score_sts = torch.cat(score_sts, dim=0)
class_sts = torch.cat(class_sts, dim=0)
idx_feat_st = torch.cat(idx_feat_st, dim=0)
pred_thing, [class_ths, score_ths] = self.thing_generator(encode_feat, feat_shape, idx_feat_th, thing_num, class_ths, score_ths)
pred_stuff, [class_sts, score_sts] = self.stuff_generator(encode_feat, feat_shape, idx_feat_st, stuff_num, class_sts, score_sts)
pred_stuff = pred_stuff.sigmoid()
if result_instance is None:
result_instance, pred_mask, class_ths, score_ths = self.process_inst(
class_ths, score_ths, pred_thing, img_shape, ori_shape)
else:
pred_mask, class_ths, score_ths = None, None, None
if self.sem_with_thing or self.cfg.MODEL.POSITION_HEAD.STUFF.ALL_CLASSES:
sem_classes = self.sem_classes
else:
sem_classes = self.sem_classes + 1
pred_stuff = F.interpolate(pred_stuff, scale_factor=self.common_stride, mode="bilinear",
align_corners=False)[...,:img_shape[0],:img_shape[1]]
pred_stuff = F.interpolate(pred_stuff, size=ori_shape, mode="bilinear", align_corners=False)[0]
pred_sem_seg = torch.zeros(sem_classes, *pred_stuff.shape[-2:], device=self.device)
pred_sem_seg[class_sts] += pred_stuff
processed_results.append({"sem_seg": pred_sem_seg, "instances": result_instance})
if self.panoptic_combine:
result_panoptic = self.combine_thing_and_stuff(
[pred_mask, class_ths, score_ths],
pred_sem_seg.argmax(dim=0),
self.panoptic_overlap_thrs,
self.panoptic_stuff_limit,
self.panoptic_inst_thrs)
processed_results[-1]["panoptic_seg"] = result_panoptic
return processed_results
@torch.no_grad()
def process_inst(self, classes, scores, pred_inst, img_shape, ori_shape):
"""
Simple process generate prediction of Things.
Args:
classes: predicted classes of Things
scores: predicted scores of Things
pred_inst: predicted instances of Things
img_shape: input image shape
ori_shape: original image shape
Returns:
result_instance: preserved results for Things
pred_mask: preserved binary masks for Things
classes: preserved object classes
scores: processed object scores
"""
pred_inst = pred_inst.sigmoid()[0]
pred_mask = pred_inst > self.inst_thres
# object rescore.
sum_masks = pred_mask.sum((1, 2)).float() + 1e-6
seg_score = (pred_inst * pred_mask.float()).sum((1, 2)) / sum_masks
scores *= seg_score
keep = torch.argsort(scores, descending=True)
pred_inst = pred_inst[keep]
pred_mask = pred_mask[keep]
scores = scores[keep]
classes = classes[keep]
sum_masks = sum_masks[keep]
# object score filter.
keep = scores >= 0.05
if keep.sum() == 0:
result_instance = Instances(ori_shape, pred_masks=[], pred_boxes=[],
pred_classes=[], scores=[])
return result_instance, pred_mask, None, None
pred_inst = pred_inst[keep]
scores = scores[keep]
classes = classes[keep]
# sort and keep top_k
keep = torch.argsort(scores, descending=True)
keep = keep[:self.center_top_num]
pred_inst = pred_inst[keep]
scores = scores[keep].reshape(-1)
classes = classes[keep].reshape(-1).to(torch.int32)
pred_inst = F.interpolate(pred_inst.unsqueeze(0),
scale_factor=self.common_stride,
mode="bilinear",
align_corners=False)[...,:img_shape[0],:img_shape[1]]
pred_inst = F.interpolate(pred_inst,
size=ori_shape,
mode="bilinear",
align_corners=False)[0]
pred_mask = pred_inst > self.inst_thres
pred_bitinst = BitMasks(pred_mask)
result_instance = Instances(ori_shape,
pred_masks=pred_bitinst,
pred_boxes=pred_bitinst.get_bounding_boxes(),
pred_classes=classes,
scores=scores)
return result_instance, pred_mask, classes, scores
@torch.no_grad()
def combine_thing_and_stuff(
self,
thing_results,
stuff_results,
overlap_threshold,
stuff_area_limit,
inst_threshold,
):
"""
Implement a simple combining logic following
"combine_semantic_and_instance_predictions.py" in panopticapi
to produce panoptic segmentation outputs.
Args:
thing_results: prediction of Things
stuff_results: prediction of Stuff
overlap_threshold: overlap threshold for Things combination
stuff_area_limit: stuff area threshold for Stuff combination
inst_threshold: instances confidence threshold
Returns:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
pred_thing, thing_cate, thing_score = thing_results
panoptic_seg = torch.zeros_like(stuff_results, dtype=torch.int32)
current_segment_id = 0
segments_info = []
if thing_cate is not None:
print("thing_cate", thing_cate)
keep = thing_score >= inst_threshold
if keep.sum() > 0:
pred_thing = pred_thing[keep]
thing_cate = thing_cate[keep]
thing_score = thing_score[keep]
# Add instances one-by-one, check for overlaps with existing ones
for _idx, (_mask, _cate, _score) in enumerate(zip(pred_thing, thing_cate, thing_score)):
mask_area = _mask.sum().item()
intersect = _mask & (panoptic_seg > 0)
intersect_area = intersect.sum().item()
if mask_area==0 or intersect_area * 1.0 / mask_area > overlap_threshold:
continue
if intersect_area > 0:
_mask = _mask & (panoptic_seg == 0)
current_segment_id += 1
panoptic_seg[_mask] = current_segment_id
thing_category_id = _cate.item()
category_id = self.meta.thing_train_id2contiguous_id[thing_category_id]
# print("category_id_th", category_id)
segments_info.append(
{
"id": current_segment_id,
"isthing": True,
"score": _score.item(),
"category_id": category_id,
"instance_id": _idx,
})
# import pdb; pdb.set_trace()
stuff_labels = torch.unique(stuff_results)
for stuff_label in stuff_labels:
stuff_category_id = stuff_label.item()
# if stuff_category_id==0 or stuff_category_id==13: #this condition is experimental because we got key error:0 on the following line
# continue
category_id = self.meta.stuff_train_id2contiguous_id[stuff_category_id]
if self.cfg.MODEL.POSITION_HEAD.STUFF.WITH_THING:
if stuff_label == 0: # 0 is a special "thing" class
continue
if self.cfg.MODEL.POSITION_HEAD.STUFF.ALL_CLASSES:
if category_id in self.meta.thing_train_id2contiguous_id.values():
continue
mask = (stuff_results == stuff_label) & (panoptic_seg == 0)
mask_area = mask.sum()
if mask_area < stuff_area_limit:
continue
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": False,
"category_id": category_id,
"area": mask_area.item(),
})
return panoptic_seg, segments_info
| 48.70364 | 144 | 0.59163 | import torch
from torch import nn
from torch.nn import functional as F
from PIL import Image
import numpy as np
import json
from detectron2.data import MetadataCatalog
from detectron2.structures import ImageList, Instances, BitMasks
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from .gt_generate import GenerateGT
from .loss import sigmoid_focal_loss, weighted_dice_loss
from .head import build_position_head, build_kernel_head, build_feature_encoder, build_thing_generator, build_stuff_generator
from .backbone_utils import build_semanticfpn, build_backbone
from .utils import topk_score, multi_apply
__all__ = ["PanopticFCN"]
@META_ARCH_REGISTRY.register()
class PanopticFCN(nn.Module):
"""
Implement PanopticFCN the paper :paper:`Fully Convolutional Networks for Panoptic Segmentation`.
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
# parameters
self.cfg = cfg
self.ignore_val = cfg.MODEL.IGNORE_VALUE
self.common_stride = cfg.MODEL.SEMANTIC_FPN.COMMON_STRIDE
self.center_top_num = cfg.MODEL.POSITION_HEAD.THING.TOP_NUM
self.weighted_num = cfg.MODEL.POSITION_HEAD.THING.POS_NUM
self.center_thres = cfg.MODEL.POSITION_HEAD.THING.THRES
self.sem_thres = cfg.MODEL.POSITION_HEAD.STUFF.THRES
self.sem_classes = cfg.MODEL.POSITION_HEAD.STUFF.NUM_CLASSES
self.sem_with_thing = cfg.MODEL.POSITION_HEAD.STUFF.WITH_THING
self.in_feature = cfg.MODEL.FEATURE_ENCODER.IN_FEATURES
self.inst_scale = cfg.MODEL.KERNEL_HEAD.INSTANCE_SCALES
self.pos_weight = cfg.MODEL.LOSS_WEIGHT.POSITION
self.seg_weight = cfg.MODEL.LOSS_WEIGHT.SEGMENT
self.focal_loss_alpha = cfg.MODEL.LOSS_WEIGHT.FOCAL_LOSS_ALPHA
self.focal_loss_gamma = cfg.MODEL.LOSS_WEIGHT.FOCAL_LOSS_GAMMA
self.inst_thres = cfg.MODEL.INFERENCE.INST_THRES
self.panoptic_combine = cfg.MODEL.INFERENCE.COMBINE.ENABLE
self.panoptic_overlap_thrs = cfg.MODEL.INFERENCE.COMBINE.OVERLAP_THRESH
self.panoptic_stuff_limit = cfg.MODEL.INFERENCE.COMBINE.STUFF_AREA_LIMIT
self.panoptic_inst_thrs = cfg.MODEL.INFERENCE.COMBINE.INST_THRESH
# backbone
self.backbone = build_backbone(cfg)
self.semantic_fpn = build_semanticfpn(cfg, self.backbone.output_shape())
self.position_head = build_position_head(cfg)
self.kernel_head = build_kernel_head(cfg)
self.feature_encoder = build_feature_encoder(cfg)
self.thing_generator = build_thing_generator(cfg)
self.stuff_generator = build_stuff_generator(cfg)
self.get_ground_truth = GenerateGT(cfg)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
dataset_names = self.cfg.DATASETS.TRAIN
self.meta = MetadataCatalog.get(dataset_names[0])
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": Instances
* "sem_seg": semantic segmentation ground truth.
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model, used in inference.
Returns:
list[dict]:
each dict is the results for one image. The dict contains the following keys:
* "instances": Instances results.
* "sem_seg": Semantic Segmentation results.
* "panoptic_seg": available when `MODEL.INFERENCE.COMBINE.ENABLE`.
See the return value of
:func:`combine_thing_and_stuff` for its format.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [self.normalizer(x) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
encode_feat = self.semantic_fpn(features)
encode_feat = self.feature_encoder(encode_feat)
features_in = [features[_feat] for _feat in self.in_feature]
pred_centers, pred_regions, pred_weights = multi_apply(self.forward_single_level, features_in)
if self.training:
gt_dict = self.get_ground_truth.generate(batched_inputs, images, pred_weights, encode_feat)
return self.losses(pred_centers, pred_regions, pred_weights, encode_feat, gt_dict)
else:
return self.inference(batched_inputs, images, pred_centers, pred_regions, pred_weights, encode_feat)
def forward_single_level(self, feature):
pred_center, pred_region = self.position_head(feature)
pred_weight = self.kernel_head(feature)
return pred_center, pred_region, pred_weight
def losses(self, pred_centers, pred_regions, pred_weights, encode_feat, gt_dict):
"""
Calculate losses of prediction with generated gt dict.
Args:
pred_centers: prediction for object centers
pred_regions: prediction for stuff regions
pred_weights: generated kernel weights for things and stuff
encode_feat: encoded high-resolution feature
gt_dict(dict): a dict contains all information of gt
gt_dict = {
"center": gt gaussian scoremap for things,
"inst": gt instance target for things,
"index": gt index for things,
"index_mask": gt index mask for things,
"class": gt classes for things,
"sem_scores": gt semantic score map for stuff,
"sem_labels":gt semantic target for stuff,
"sem_index": gt index for stuff,
"sem_masks": gt index mask for stuff,
}
Returns:
loss(dict): a dict contains all information of loss function
loss = {
"loss_pos_th": position loss for things,
"loss_pos_st": position loss for stuff,
"loss_seg_th": segmentation loss for things,
"loss_seg_st": segmentation loss for stuff,
}
"""
feat_shape = encode_feat.shape
encode_feat = encode_feat.reshape(*feat_shape[:2], -1)
loss_pos_ths, loss_pos_sts, idx_feat_th, weighted_values, idx_feat_st, thing_nums, stuff_nums = \
multi_apply(self.loss_single_level, pred_centers,
pred_regions, pred_weights,
gt_dict["center"], gt_dict["inst"],
gt_dict["index_mask"], gt_dict["class"],
gt_dict["sem_scores"], gt_dict["sem_masks"],
gt_dict["sem_index"])
thing_num = sum(thing_nums)
stuff_num = sum(stuff_nums)
idx_feat_th = torch.cat(idx_feat_th, dim=2)
weighted_values = torch.cat(weighted_values, dim=1)
idx_feat_st = torch.cat(idx_feat_st, dim=1)
idx_feat_st = idx_feat_st.reshape(-1, *idx_feat_st.shape[2:])
thing_pred, _ = self.thing_generator(encode_feat, feat_shape, idx_feat_th, thing_num)
stuff_pred, _ = self.stuff_generator(encode_feat, feat_shape, idx_feat_st, stuff_num)
# for thing
thing_gt_idx = [_gt[:,:thing_nums[_idx]] for _idx, _gt in enumerate(gt_dict["index_mask"])]
thing_gt_idx = torch.cat(thing_gt_idx, dim=1)
thing_gt_idx = thing_gt_idx.reshape(-1).bool()
thing_gt_num = int(thing_gt_idx.sum())
thing_gt = [_gt[:,:thing_nums[_idx],...] for _idx, _gt in enumerate(gt_dict["inst"])]
thing_gt = torch.cat(thing_gt, dim=1)
loss_thing = weighted_dice_loss(thing_pred, thing_gt,
gt_num=thing_gt_num,
index_mask=thing_gt_idx,
instance_num=thing_num,
weighted_val=weighted_values,
weighted_num=self.weighted_num,
mode="thing",
reduction="sum")
# for stuff
stuff_gt_idx = [_gt[:,:stuff_nums[_idx]] for _idx, _gt in enumerate(gt_dict["sem_index"])]
stuff_gt_idx = torch.cat(stuff_gt_idx, dim=1)
stuff_gt_idx = stuff_gt_idx.reshape(-1).bool()
stuff_gt_num = int(stuff_gt_idx.sum())
stuff_gt = [_gt[:,:stuff_nums[_idx],...] for _idx, _gt in enumerate(gt_dict["sem_labels"])]
stuff_gt = torch.cat(stuff_gt, dim=1)
loss_stuff = weighted_dice_loss(stuff_pred, stuff_gt,
gt_num=stuff_gt_num,
index_mask=stuff_gt_idx,
instance_num=stuff_num,
weighted_val=1.0,
weighted_num=1,
mode="stuff",
reduction="sum")
loss = {}
# position loss
loss["loss_pos_th"] = self.pos_weight * sum(loss_pos_ths) / max(thing_gt_num, 1)
loss["loss_pos_st"] = self.pos_weight * sum(loss_pos_sts) / max(feat_shape[0],1)
# segmentation loss
loss["loss_seg_th"] = self.seg_weight * loss_thing / max(thing_gt_num, 1)
loss["loss_seg_st"] = self.seg_weight * loss_stuff / max(stuff_gt_num, 1)
return loss
def loss_single_level(self, pred_center, pred_region, pred_weights, \
gt_center, gt_inst, gt_index_mask, gt_class, \
gt_sem_scores, gt_sem_masks, gt_sem_index):
# position loss for things
loss_pos_th = sigmoid_focal_loss(pred_center, gt_center,
mode="thing",
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum")
# position loss for stuff
loss_pos_st = sigmoid_focal_loss(pred_region, gt_sem_scores,
mode="stuff",
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum")
# generate guided center
batch_num, _, feat_h, feat_w = pred_center.shape
guided_inst = F.interpolate(gt_inst, size=(feat_h, feat_w), mode='bilinear', align_corners=False)
guidence = torch.zeros_like(guided_inst)
pred_select = []
for _idx in range(batch_num):
sub_pred = pred_center[_idx]
sub_class = gt_class[_idx].to(torch.int64)
sub_select = torch.index_select(sub_pred, dim=0, index=sub_class)
pred_select.append(sub_select.sigmoid())
pred_select = torch.stack(pred_select, dim=0)
keep = (guided_inst > 0.1) & (guided_inst < 255)
guidence[keep] = pred_select[keep]
weighted_values, guided_index = torch.topk(guidence.reshape(*guided_inst.shape[:2], -1),
k=self.weighted_num, dim=-1)
thing_num = int(max(gt_index_mask.sum(dim=1).max(), 1))
guided_index = guided_index[:,:thing_num, :]
guided_index = guided_index.reshape(batch_num, -1)
weighted_values = weighted_values[:,:thing_num, :]
# pred instance
weight_shape = pred_weights.shape
inst_w = pred_weights.reshape(*weight_shape[:2], -1)
idx_inst = guided_index.unsqueeze(1).expand(*weight_shape[:2], -1)
idx_feat_th = torch.gather(inst_w, dim=2, index=idx_inst)
idx_feat_th = idx_feat_th.reshape(*weight_shape[:2], thing_num, self.weighted_num)
# generate guided sem
stuff_num = int(max(gt_sem_index.sum(dim=1).max(), 1))
gt_sem_masks = gt_sem_masks[:, :stuff_num]
gt_sem_masks = gt_sem_masks.unsqueeze(2)
idx_feat_st = gt_sem_masks * pred_weights.unsqueeze(1)
idx_feat_st = idx_feat_st.reshape(-1, *weight_shape[-3:])
idx_feat_st = F.adaptive_avg_pool2d(idx_feat_st, output_size=1)
idx_feat_st = idx_feat_st.reshape(batch_num, -1, weight_shape[1], 1, 1)
return loss_pos_th, loss_pos_st, idx_feat_th, weighted_values, idx_feat_st, thing_num, stuff_num
@torch.no_grad()
def inference_single_level(self, pred_center, pred_region, pred_weights, pool_size):
# pred things
pred_center = pred_center.sigmoid()
center_pool = F.avg_pool2d(pred_center, kernel_size=pool_size,
stride=1, padding=(pool_size-1)//2)
pred_center = (pred_center + center_pool) / 2.0
fmap_max = F.max_pool2d(pred_center, 3, stride=1, padding=1)
keep = (fmap_max == pred_center).float()
pred_center *= keep
weight_shape = pred_weights.shape
center_shape = pred_center.shape
top_num = min(center_shape[-2]*center_shape[-1], self.center_top_num//2)
sub_score, sub_index, sub_class, ys, xs = \
topk_score(pred_center, K=top_num, score_shape=center_shape)
keep = sub_score > self.center_thres
score_th = sub_score[keep]
class_th = sub_class[keep]
index = sub_index[keep]
index = index.unsqueeze(0).to(device=self.device, dtype=torch.long)
thing_num = keep.sum()
if thing_num > 0:
inst_w = pred_weights.reshape(*weight_shape[:2], -1)
idx_inst = index.unsqueeze(1).expand(*weight_shape[:2], -1)
idx_feat_th = torch.gather(inst_w, dim=2, index=idx_inst)
idx_feat_th = idx_feat_th.unsqueeze(-1)
else:
idx_feat_th, class_th, score_th = [], [], []
# pred stuff
pred_region = pred_region.sigmoid()
pred_cate = pred_region.argmax(dim=1)
class_st, num_class_st = torch.unique(pred_cate, return_counts=True)
pred_st_mask = F.one_hot(pred_cate, num_classes=self.sem_classes)
pred_st_mask = pred_st_mask.permute(0, 3, 1, 2).contiguous()
score_st = (pred_region * pred_st_mask).reshape(1, self.sem_classes, -1)
score_st = (score_st.sum(dim=-1)[:, class_st] / num_class_st).squeeze(0)
pred_st_mask = pred_st_mask[:, class_st]
keep = score_st > self.sem_thres
stuff_num = keep.sum()
score_st = score_st[keep]
class_st = class_st[keep]
pred_st_mask = pred_st_mask[:, keep]
pred_st_mask = pred_st_mask.unsqueeze(2)
idx_feat_st = pred_st_mask * pred_weights.unsqueeze(1)
idx_feat_st = idx_feat_st.reshape(-1, *weight_shape[-3:])
idx_feat_st = F.adaptive_avg_pool2d(idx_feat_st, output_size=1)
if not self.cfg.MODEL.POSITION_HEAD.STUFF.ALL_CLASSES:
if not self.sem_with_thing:
class_st += 1
return idx_feat_th, class_th, score_th, thing_num, idx_feat_st, score_st, class_st, stuff_num
@torch.no_grad()
def inference(self, batch_inputs, images, pred_centers, pred_regions, pred_weights, encode_feat):
"""
Panoptic FCN inference process.
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`
image: ImageList in detectron2.structures
pred_centers: prediction for object centers
pred_regions: prediction for stuff regions
pred_weights: generated kernel weights for things and stuff
encode_feat: encoded high-resolution feature
Returns:
processed_results(dict): a dict contains all predicted results
processed_results={
"sem_seg": prediction of stuff for semantic segmentation eval,
"instances": prediction of things for instance segmentation eval,
"panoptic_seg": prediction of both for panoptic segmentation eval.
}
"""
results = batch_inputs
processed_results = []
for img_idx, result_img in enumerate(results):
if "instances" in result_img.keys():
img_shape = result_img["instances"].image_size
else:
img_shape = result_img["image"].shape[-2:]
ori_shape = (result_img["height"], result_img["width"])
encode_feat = encode_feat[img_idx].unsqueeze(0)
feat_shape = encode_feat.shape
encode_feat = encode_feat.reshape(*feat_shape[:2], -1)
result_instance = None
pred_regions = [_pred[img_idx].unsqueeze(0) for _pred in pred_regions]
pred_weights = [_pred[img_idx].unsqueeze(0) for _pred in pred_weights]
pred_centers = [_pred[img_idx].unsqueeze(0) for _pred in pred_centers]
pool_size = [3,3,3,5,5]
idx_feat_th, class_ths, score_ths, thing_num, idx_feat_st, score_sts, class_sts, stuff_num = \
multi_apply(self.inference_single_level, pred_centers,\
pred_regions, pred_weights, pool_size)
thing_num = sum(thing_num)
if thing_num == 0:
result_instance = Instances(ori_shape, pred_masks=[], pred_boxes=[],
pred_classes=[], scores=[])
else:
class_ths = [_class for _class in class_ths if len(_class)>0]
score_ths = [_score for _score in score_ths if len(_score)>0]
idx_feat_th = [_feat for _feat in idx_feat_th if len(_feat)>0]
class_ths = torch.cat(class_ths, dim=0)
score_ths = torch.cat(score_ths, dim=0)
idx_feat_th = torch.cat(idx_feat_th, dim=2)
keep = torch.argsort(score_ths, descending=True)
idx_feat_th = idx_feat_th[:,:,keep]
score_ths = score_ths[keep]
class_ths = class_ths[keep]
stuff_num = sum(stuff_num)
if stuff_num == 0:
class_sts, idx_feat_st, score_sts = [], [], []
else:
score_sts = [_score for _score in score_sts if len(_score)>0]
class_sts = [_cate_sem for _cate_sem in class_sts if len(_cate_sem)>0]
idx_feat_st = [_feat for _feat in idx_feat_st if len(_feat)>0]
score_sts = torch.cat(score_sts, dim=0)
class_sts = torch.cat(class_sts, dim=0)
idx_feat_st = torch.cat(idx_feat_st, dim=0)
pred_thing, [class_ths, score_ths] = self.thing_generator(encode_feat, feat_shape, idx_feat_th, thing_num, class_ths, score_ths)
pred_stuff, [class_sts, score_sts] = self.stuff_generator(encode_feat, feat_shape, idx_feat_st, stuff_num, class_sts, score_sts)
pred_stuff = pred_stuff.sigmoid()
if result_instance is None:
result_instance, pred_mask, class_ths, score_ths = self.process_inst(
class_ths, score_ths, pred_thing, img_shape, ori_shape)
else:
pred_mask, class_ths, score_ths = None, None, None
if self.sem_with_thing or self.cfg.MODEL.POSITION_HEAD.STUFF.ALL_CLASSES:
sem_classes = self.sem_classes
else:
sem_classes = self.sem_classes + 1
pred_stuff = F.interpolate(pred_stuff, scale_factor=self.common_stride, mode="bilinear",
align_corners=False)[...,:img_shape[0],:img_shape[1]]
pred_stuff = F.interpolate(pred_stuff, size=ori_shape, mode="bilinear", align_corners=False)[0]
pred_sem_seg = torch.zeros(sem_classes, *pred_stuff.shape[-2:], device=self.device)
pred_sem_seg[class_sts] += pred_stuff
processed_results.append({"sem_seg": pred_sem_seg, "instances": result_instance})
if self.panoptic_combine:
result_panoptic = self.combine_thing_and_stuff(
[pred_mask, class_ths, score_ths],
pred_sem_seg.argmax(dim=0),
self.panoptic_overlap_thrs,
self.panoptic_stuff_limit,
self.panoptic_inst_thrs)
processed_results[-1]["panoptic_seg"] = result_panoptic
return processed_results
@torch.no_grad()
def process_inst(self, classes, scores, pred_inst, img_shape, ori_shape):
"""
Simple process generate prediction of Things.
Args:
classes: predicted classes of Things
scores: predicted scores of Things
pred_inst: predicted instances of Things
img_shape: input image shape
ori_shape: original image shape
Returns:
result_instance: preserved results for Things
pred_mask: preserved binary masks for Things
classes: preserved object classes
scores: processed object scores
"""
pred_inst = pred_inst.sigmoid()[0]
pred_mask = pred_inst > self.inst_thres
# object rescore.
sum_masks = pred_mask.sum((1, 2)).float() + 1e-6
seg_score = (pred_inst * pred_mask.float()).sum((1, 2)) / sum_masks
scores *= seg_score
keep = torch.argsort(scores, descending=True)
pred_inst = pred_inst[keep]
pred_mask = pred_mask[keep]
scores = scores[keep]
classes = classes[keep]
sum_masks = sum_masks[keep]
# object score filter.
keep = scores >= 0.05
if keep.sum() == 0:
result_instance = Instances(ori_shape, pred_masks=[], pred_boxes=[],
pred_classes=[], scores=[])
return result_instance, pred_mask, None, None
pred_inst = pred_inst[keep]
scores = scores[keep]
classes = classes[keep]
# sort and keep top_k
keep = torch.argsort(scores, descending=True)
keep = keep[:self.center_top_num]
pred_inst = pred_inst[keep]
scores = scores[keep].reshape(-1)
classes = classes[keep].reshape(-1).to(torch.int32)
pred_inst = F.interpolate(pred_inst.unsqueeze(0),
scale_factor=self.common_stride,
mode="bilinear",
align_corners=False)[...,:img_shape[0],:img_shape[1]]
pred_inst = F.interpolate(pred_inst,
size=ori_shape,
mode="bilinear",
align_corners=False)[0]
pred_mask = pred_inst > self.inst_thres
pred_bitinst = BitMasks(pred_mask)
result_instance = Instances(ori_shape,
pred_masks=pred_bitinst,
pred_boxes=pred_bitinst.get_bounding_boxes(),
pred_classes=classes,
scores=scores)
return result_instance, pred_mask, classes, scores
@torch.no_grad()
def combine_thing_and_stuff(
self,
thing_results,
stuff_results,
overlap_threshold,
stuff_area_limit,
inst_threshold,
):
"""
Implement a simple combining logic following
"combine_semantic_and_instance_predictions.py" in panopticapi
to produce panoptic segmentation outputs.
Args:
thing_results: prediction of Things
stuff_results: prediction of Stuff
overlap_threshold: overlap threshold for Things combination
stuff_area_limit: stuff area threshold for Stuff combination
inst_threshold: instances confidence threshold
Returns:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
pred_thing, thing_cate, thing_score = thing_results
panoptic_seg = torch.zeros_like(stuff_results, dtype=torch.int32)
current_segment_id = 0
segments_info = []
if thing_cate is not None:
print("thing_cate", thing_cate)
keep = thing_score >= inst_threshold
if keep.sum() > 0:
pred_thing = pred_thing[keep]
thing_cate = thing_cate[keep]
thing_score = thing_score[keep]
# Add instances one-by-one, check for overlaps with existing ones
for _idx, (_mask, _cate, _score) in enumerate(zip(pred_thing, thing_cate, thing_score)):
mask_area = _mask.sum().item()
intersect = _mask & (panoptic_seg > 0)
intersect_area = intersect.sum().item()
if mask_area==0 or intersect_area * 1.0 / mask_area > overlap_threshold:
continue
if intersect_area > 0:
_mask = _mask & (panoptic_seg == 0)
current_segment_id += 1
panoptic_seg[_mask] = current_segment_id
thing_category_id = _cate.item()
category_id = self.meta.thing_train_id2contiguous_id[thing_category_id]
# print("category_id_th", category_id)
segments_info.append(
{
"id": current_segment_id,
"isthing": True,
"score": _score.item(),
"category_id": category_id,
"instance_id": _idx,
})
# import pdb; pdb.set_trace()
stuff_labels = torch.unique(stuff_results)
for stuff_label in stuff_labels:
stuff_category_id = stuff_label.item()
# if stuff_category_id==0 or stuff_category_id==13: #this condition is experimental because we got key error:0 on the following line
# continue
category_id = self.meta.stuff_train_id2contiguous_id[stuff_category_id]
if self.cfg.MODEL.POSITION_HEAD.STUFF.WITH_THING:
if stuff_label == 0: # 0 is a special "thing" class
continue
if self.cfg.MODEL.POSITION_HEAD.STUFF.ALL_CLASSES:
if category_id in self.meta.thing_train_id2contiguous_id.values():
continue
mask = (stuff_results == stuff_label) & (panoptic_seg == 0)
mask_area = mask.sum()
if mask_area < stuff_area_limit:
continue
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": False,
"category_id": category_id,
"area": mask_area.item(),
})
return panoptic_seg, segments_info
| 8,239 | 0 | 106 |
801449851e6d92ac3bb3e4549173f3a906db55d5 | 337 | py | Python | core/util/graph.py | ongmingyang/some-max-cut | 7ebabd06d3e46789a3672bd516adc48953ba135e | [
"MIT"
] | 3 | 2018-03-16T17:25:23.000Z | 2021-04-27T21:42:31.000Z | core/util/graph.py | ongmingyang/some-max-cut | 7ebabd06d3e46789a3672bd516adc48953ba135e | [
"MIT"
] | null | null | null | core/util/graph.py | ongmingyang/some-max-cut | 7ebabd06d3e46789a3672bd516adc48953ba135e | [
"MIT"
] | null | null | null | #
# A graph G = (V,E) is a set of nodes V and a set of edges E
#
# Returns a list of edges sorted by weight in decreasing order
| 25.923077 | 64 | 0.667656 | #
# A graph G = (V,E) is a set of nodes V and a set of edges E
#
class Graph:
def __init__(self):
self.nodes = []
self.edges = {} # Dictionary of {edge: weight}
# Returns a list of edges sorted by weight in decreasing order
def get_sorted_edges(self, r=True):
return sorted(self.edges, key=self.edges.get, reverse=r)
| 144 | -9 | 70 |
2d9149a226a8f401e814a15c8d5900c3475a793b | 3,472 | py | Python | ca-hospitalizations/main.py | edublancas/kaggle | 368def08908bf8f7f88a4f5e0b6c7b40ca0d2b6b | [
"Apache-2.0"
] | null | null | null | ca-hospitalizations/main.py | edublancas/kaggle | 368def08908bf8f7f88a4f5e0b6c7b40ca0d2b6b | [
"Apache-2.0"
] | null | null | null | ca-hospitalizations/main.py | edublancas/kaggle | 368def08908bf8f7f88a4f5e0b6c7b40ca0d2b6b | [
"Apache-2.0"
] | null | null | null | # # California COVID-19 Hospitalizations
#
# COVID-19 hospitalizations in California by county.
#
# ## Requirements
# ! conda install pygraphviz --yes --quiet
# ! pip install jupyter pandas ploomber matplotlib --quiet
# ## Data sources
#
# COVID-19: https://data.chhs.ca.gov/dataset/california-covid-19-hospital-data-and-case-statistics
#
# Population: https://data.ca.gov/dataset/california-population-projection-by-county-age-gender-and-ethnicity
#
# ## Data cleaning
# +
import pandas as pd
from pathlib import Path
from IPython.display import Image
import matplotlib.pyplot as plt
import matplotlib as mpl
from ploomber import DAG
from ploomber.tasks import DownloadFromURL, PythonCallable
from ploomber.products import File
# +
# matplotlib config
plt.style.use('ggplot')
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['figure.figsize'] = [8.0, 8.0]
mpl.rcParams['figure.dpi'] = 120
# -
# we will save everything in output/
ROOT = Path('output/')
ROOT.mkdir(exist_ok=True)
# we use ploomber to get the organize our tasks
dag = DAG()
# +
# first two tasks just download the data
SOURCE = 'https://data.chhs.ca.gov/dataset/6882c390-b2d7-4b9a-aefa-2068cee63e47/resource/6cd8d424-dfaa-4bdd-9410-a3d656e1176e/download/covid19data.csv'
download = DownloadFromURL(SOURCE, File(ROOT / 'raw.csv'), dag, name='raw')
SOURCE_POP = 'https://data.ca.gov/dataset/7a8c03d3-ed86-498a-acdb-8ea09ccb4130/resource/2c217b79-4625-4ab2-86b3-6fc5d66f0409/download/population-estimates-and-projections-by-county-age-and-sex-california-1970-2050.csv'
download_pop = DownloadFromURL(SOURCE_POP, File(
ROOT / 'raw_pop'), dag, name='raw_pop')
# -
# we then join the downloaded data to normalize using population by county
def _join(upstream, product):
"""Join California COVID-19 hospitalizations with population data
"""
df = pd.read_csv(str(upstream['raw']))
df['Most Recent Date'] = pd.to_datetime(df['Most Recent Date'])
idx_m_recent = df.groupby('County Name')['Most Recent Date'].idxmax()
m_recent_total = df.iloc[idx_m_recent][['Most Recent Date', 'County Name',
'Total Count Confirmed']]
m_recent_total['county'] = m_recent_total['County Name'].str.upper()
pop = pd.read_csv(str(upstream['raw_pop']))
pop_by_county = pop[pop.year == 2020].groupby('county')[
['pop_total']].sum()
m_recent = pop_by_county.merge(m_recent_total, on='county')
m_recent['Total count per 100k population'] = (m_recent['Total Count Confirmed']
/ m_recent['pop_total'] * 100_000)
m_recent.to_csv(str(product), index=False)
# +
join = PythonCallable(_join, File(ROOT / 'joined.csv'), dag, name='joined')
# the joined data depends on the raw data
(download + download_pop) >> join
# -
# summary table
dag.status()
# plot. NOTE: pygraphviz is required to plot, easiest way to install is via "conda install pygraphviz"
path = dag.plot()
Image(filename=path)
# run all tasks
dag.build()
# ## Hospitalizations per 100,000 people
# load joined data
m_recent = pd.read_csv(str(dag['joined']))
(m_recent[['County Name', 'Total count per 100k population']]
.set_index('County Name')
.sort_values(by='Total count per 100k population', ascending=False)
.head(10)
.plot(kind='bar', title='Normalized cases by county (top 10)'))
| 31.279279 | 218 | 0.709965 | # # California COVID-19 Hospitalizations
#
# COVID-19 hospitalizations in California by county.
#
# ## Requirements
# ! conda install pygraphviz --yes --quiet
# ! pip install jupyter pandas ploomber matplotlib --quiet
# ## Data sources
#
# COVID-19: https://data.chhs.ca.gov/dataset/california-covid-19-hospital-data-and-case-statistics
#
# Population: https://data.ca.gov/dataset/california-population-projection-by-county-age-gender-and-ethnicity
#
# ## Data cleaning
# +
import pandas as pd
from pathlib import Path
from IPython.display import Image
import matplotlib.pyplot as plt
import matplotlib as mpl
from ploomber import DAG
from ploomber.tasks import DownloadFromURL, PythonCallable
from ploomber.products import File
# +
# matplotlib config
plt.style.use('ggplot')
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['figure.figsize'] = [8.0, 8.0]
mpl.rcParams['figure.dpi'] = 120
# -
# we will save everything in output/
ROOT = Path('output/')
ROOT.mkdir(exist_ok=True)
# we use ploomber to get the organize our tasks
dag = DAG()
# +
# first two tasks just download the data
SOURCE = 'https://data.chhs.ca.gov/dataset/6882c390-b2d7-4b9a-aefa-2068cee63e47/resource/6cd8d424-dfaa-4bdd-9410-a3d656e1176e/download/covid19data.csv'
download = DownloadFromURL(SOURCE, File(ROOT / 'raw.csv'), dag, name='raw')
SOURCE_POP = 'https://data.ca.gov/dataset/7a8c03d3-ed86-498a-acdb-8ea09ccb4130/resource/2c217b79-4625-4ab2-86b3-6fc5d66f0409/download/population-estimates-and-projections-by-county-age-and-sex-california-1970-2050.csv'
download_pop = DownloadFromURL(SOURCE_POP, File(
ROOT / 'raw_pop'), dag, name='raw_pop')
# -
# we then join the downloaded data to normalize using population by county
def _join(upstream, product):
"""Join California COVID-19 hospitalizations with population data
"""
df = pd.read_csv(str(upstream['raw']))
df['Most Recent Date'] = pd.to_datetime(df['Most Recent Date'])
idx_m_recent = df.groupby('County Name')['Most Recent Date'].idxmax()
m_recent_total = df.iloc[idx_m_recent][['Most Recent Date', 'County Name',
'Total Count Confirmed']]
m_recent_total['county'] = m_recent_total['County Name'].str.upper()
pop = pd.read_csv(str(upstream['raw_pop']))
pop_by_county = pop[pop.year == 2020].groupby('county')[
['pop_total']].sum()
m_recent = pop_by_county.merge(m_recent_total, on='county')
m_recent['Total count per 100k population'] = (m_recent['Total Count Confirmed']
/ m_recent['pop_total'] * 100_000)
m_recent.to_csv(str(product), index=False)
# +
join = PythonCallable(_join, File(ROOT / 'joined.csv'), dag, name='joined')
# the joined data depends on the raw data
(download + download_pop) >> join
# -
# summary table
dag.status()
# plot. NOTE: pygraphviz is required to plot, easiest way to install is via "conda install pygraphviz"
path = dag.plot()
Image(filename=path)
# run all tasks
dag.build()
# ## Hospitalizations per 100,000 people
# load joined data
m_recent = pd.read_csv(str(dag['joined']))
(m_recent[['County Name', 'Total count per 100k population']]
.set_index('County Name')
.sort_values(by='Total count per 100k population', ascending=False)
.head(10)
.plot(kind='bar', title='Normalized cases by county (top 10)'))
| 0 | 0 | 0 |
0f86dbae1d1bebee6773f04c485fa461ab6aa28e | 4,831 | py | Python | sastool/classes2/scan/specfile.py | awacha/sastool | c77c100d5b6e69a6327ece3936116207f3cdf961 | [
"BSD-3-Clause"
] | 1 | 2017-02-06T13:15:41.000Z | 2017-02-06T13:15:41.000Z | sastool/classes2/scan/specfile.py | awacha/sastool | c77c100d5b6e69a6327ece3936116207f3cdf961 | [
"BSD-3-Clause"
] | 1 | 2017-02-16T16:06:23.000Z | 2017-02-16T16:06:23.000Z | sastool/classes2/scan/specfile.py | awacha/sastool | c77c100d5b6e69a6327ece3936116207f3cdf961 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import warnings
import weakref
import dateutil.parser
import numpy as np
| 38.959677 | 130 | 0.474022 | import datetime
import warnings
import weakref
import dateutil.parser
import numpy as np
class SpecScan(object):
def __init__(self, specfile, index, linenumber, command, startdate=None, comment='', motor=''):
self.specfile = specfile
self.index = index
self.linenumber = linenumber
self.command = command
self.motor = motor
if startdate is None:
startdate = datetime.datetime.now()
self.startdate = startdate
self.comment = comment
self.data = None
self.motorpositionsatstart = []
self.columnnames = []
def reload(self):
with open(self.specfile.filename, 'rt', encoding='utf-8') as f:
for i in range(self.linenumber):
f.readline()
l = f.readline() # this is the #S line
begin, index, command = l.split(None, 2)
assert begin == '#S'
assert int(index) == self.index
assert command == self.command
while l.startswith('#'):
# read the header
l = f.readline()
if not l:
# EOF, happens when no scan data is recorded.
break
elif not l.startswith('#'):
# this line is the first data line
break
elif l.startswith('#D '):
self.startdate = dateutil.parser.parse(l.split(None, 1)[1])
elif l.startswith('#C '):
self.comment = l.split(None, 1)[1]
elif l.startswith('#T '):
self.countingtime = float(l.split(None, 2)[1])
self.countingunits = l.strip().split(None, 2)[2][1:-1]
elif l.startswith('#P'):
self.motorpositionsatstart.extend([float(x) for x in l.split()[1:]])
elif l.startswith('#N '):
self.expectedlength = int(l.split(None, 1)[1])
elif l.startswith('#L '):
self.columnnames = l.split(None)[1:]
else:
pass
# warnings.warn('Unknown line in scan #{:d} of scanfile {}: {}'.format(self.index, self.specfile.filename, l))
self.dtype = np.dtype(list(zip(self.columnnames, [np.float] * len(self.columnnames))))
if not l:
# no data in the file
self.data = np.zeros(0, dtype=self.dtype)
else:
self.data = []
while l.strip():
self.data.append(tuple([float(x) for x in l.split()]))
l = f.readline()
self.data = np.array(self.data, dtype=self.dtype)
class SpecFile(object):
def __init__(self, filename):
self.filename = filename
self.toc = {}
self.reload()
def get_scan(self, index):
return self.toc[index]
def max_scans(self):
return max(self.toc.keys())
def reload(self):
with open(self.filename, 'rt', encoding='utf-8') as f:
l = ''
# read the header part
lineindex = -1
while not l.startswith('#S '):
l = f.readline()
lineindex += 1
if l.startswith('#F '):
self.original_filename = l.split(None, 1)[1]
elif l.startswith('#E '):
self.epoch = float(l.split(None, 1)[1])
elif l.startswith('#D '):
self.datecreated = dateutil.parser.parse(l.split(None, 1)[1])
elif l.startswith('#C '):
self.comment = l.split(None, 1)[1]
elif l.startswith('#S '):
break
elif l.startswith('#'):
warnings.warn('Unknown line in the header of scan file {}: {}'.format(self.filename, l))
elif not l.strip():
pass
else:
assert False
index = None
while True:
if l.startswith('#S '):
begin, index, command = l.split(None, 2)
index = int(index)
self.toc[index] = SpecScan(weakref.proxy(self), index, lineindex, command)
elif l.startswith('#D '):
self.toc[index].startdate = dateutil.parser.parse(l.split(None, 1)[1])
elif l.startswith('#C '):
self.toc[index].comment = l.split(None, 1)[1]
elif l.startswith('#L '):
self.toc[index].motor = l.split()[1]
else:
pass
l = f.readline()
lineindex += 1
if not l:
break
| 4,529 | 4 | 206 |
f16de43a8fd0c0148dd86cd9c8b0afec95bbef58 | 3,064 | py | Python | tools/docker/deepwalk-demo.py | wjlight/euler | 47dbcaa46254b4d58c6cb1ed846910fec607394a | [
"Apache-2.0"
] | null | null | null | tools/docker/deepwalk-demo.py | wjlight/euler | 47dbcaa46254b4d58c6cb1ed846910fec607394a | [
"Apache-2.0"
] | null | null | null | tools/docker/deepwalk-demo.py | wjlight/euler | 47dbcaa46254b4d58c6cb1ed846910fec607394a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import tensorflow as tf
import tf_euler
if __name__ == '__main__':
print("begin....")
tf_euler.initialize_embedded_graph('ppi') # 图数据目录
source = tf_euler.sample_node(128, tf_euler.ALL_NODE_TYPE)
source.set_shape([128])
model = DeepWalk(tf_euler.ALL_NODE_TYPE, [0, 1], 56944, 256)
_, loss, metric_name, metric = model(source)
global_step = tf.train.get_or_create_global_step()
train_op = tf.train.GradientDescentOptimizer(0.2).minimize(loss, global_step)
tf.logging.set_verbosity(tf.logging.INFO)
with tf.train.MonitoredTrainingSession(
hooks=[
tf.train.LoggingTensorHook({'step': global_step,
'loss': loss, metric_name: metric}, 100),
tf.train.StopAtStepHook(2000)
]) as sess:
while not sess.should_stop():
sess.run(train_op)
| 37.82716 | 81 | 0.686684 | # -*- coding: utf-8 -*-
import tensorflow as tf
import tf_euler
class DeepWalk(tf_euler.layers.Layer):
def __init__(self, node_type, edge_type, max_id, dim,
num_negs=8, walk_len=3, left_win_size=1, right_win_size=1):
super(DeepWalk, self).__init__()
self.node_type = node_type
self.edge_type = edge_type
self.max_id = max_id
self.num_negs = num_negs
self.walk_len = walk_len
self.left_win_size = left_win_size
self.right_win_size = right_win_size
self.target_encoder = tf_euler.layers.Embedding(max_id + 1, dim)
self.context_encoder = tf_euler.layers.Embedding(max_id + 1, dim)
def call(self, inputs):
src, pos, negs = self.sampler(inputs)
embedding = self.target_encoder(src)
embedding_pos = self.context_encoder(pos)
embedding_negs = self.context_encoder(negs)
loss, mrr = self.decoder(embedding, embedding_pos, embedding_negs)
embedding = self.target_encoder(inputs)
return (embedding, loss, 'mrr', mrr)
def sampler(self, inputs):
batch_size = tf.size(inputs)
path = tf_euler.random_walk(
inputs, [self.edge_type] * self.walk_len,
default_node=self.max_id + 1)
pair = tf_euler.gen_pair(path, self.left_win_size, self.right_win_size)
num_pairs = pair.shape[1]
src, pos = tf.split(pair, [1, 1], axis=-1)
negs = tf_euler.sample_node(batch_size * num_pairs * self.num_negs,
self.node_type)
src = tf.reshape(src, [batch_size * num_pairs, 1])
pos = tf.reshape(pos, [batch_size * num_pairs, 1])
negs = tf.reshape(negs, [batch_size * num_pairs, self.num_negs])
return src, pos, negs
def decoder(self, embedding, embedding_pos, embedding_negs):
logits = tf.matmul(embedding, embedding_pos, transpose_b=True)
neg_logits = tf.matmul(embedding, embedding_negs, transpose_b=True)
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(logits), logits=logits)
negative_xent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(neg_logits), logits=neg_logits)
loss = tf.reduce_sum(true_xent) + tf.reduce_sum(negative_xent)
mrr = tf_euler.metrics.mrr_score(logits, neg_logits)
return loss, mrr
if __name__ == '__main__':
print("begin....")
tf_euler.initialize_embedded_graph('ppi') # 图数据目录
source = tf_euler.sample_node(128, tf_euler.ALL_NODE_TYPE)
source.set_shape([128])
model = DeepWalk(tf_euler.ALL_NODE_TYPE, [0, 1], 56944, 256)
_, loss, metric_name, metric = model(source)
global_step = tf.train.get_or_create_global_step()
train_op = tf.train.GradientDescentOptimizer(0.2).minimize(loss, global_step)
tf.logging.set_verbosity(tf.logging.INFO)
with tf.train.MonitoredTrainingSession(
hooks=[
tf.train.LoggingTensorHook({'step': global_step,
'loss': loss, metric_name: metric}, 100),
tf.train.StopAtStepHook(2000)
]) as sess:
while not sess.should_stop():
sess.run(train_op)
| 2,031 | 17 | 122 |
573e5bad0a5a0346c2041006a541f977c5263b28 | 659 | py | Python | trycode/trycode.py | GuardianWang/DM_hw_bagging | 3bccfb0c4ae32fdb757055d1e726ed585f9d2885 | [
"MIT"
] | null | null | null | trycode/trycode.py | GuardianWang/DM_hw_bagging | 3bccfb0c4ae32fdb757055d1e726ed585f9d2885 | [
"MIT"
] | null | null | null | trycode/trycode.py | GuardianWang/DM_hw_bagging | 3bccfb0c4ae32fdb757055d1e726ed585f9d2885 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from model.config import arguments
from model.model import BaseNet
from dataset.dataset import FlowerData
import matplotlib.pyplot as plt
# model = BaseNet(num_class=2)
# model = model.half()
# stat_dict = torch.load('./checkpoint/base_epoch%.4d.pth' % 100)
# model.load_state_dict(stat_dict, strict=False)
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
ax1.plot([1, 2], [3, 4])
ax2.plot([3, 4], [1, 2])
# fig1.show()
# fig2.show()
plt.show()
print()
| 19.382353 | 65 | 0.732929 | import numpy as np
import cv2
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from model.config import arguments
from model.model import BaseNet
from dataset.dataset import FlowerData
import matplotlib.pyplot as plt
# model = BaseNet(num_class=2)
# model = model.half()
# stat_dict = torch.load('./checkpoint/base_epoch%.4d.pth' % 100)
# model.load_state_dict(stat_dict, strict=False)
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
ax1.plot([1, 2], [3, 4])
ax2.plot([3, 4], [1, 2])
# fig1.show()
# fig2.show()
plt.show()
print()
| 0 | 0 | 0 |
3f48b44f39363f405aac8826e506e8771f05b074 | 8,734 | py | Python | lib/tray_icon.py | neurodiverseEsoteric/nimbus | a79a37d6001d84116831c9f3f99f5d9acfa7fd19 | [
"CC-BY-3.0"
] | 3 | 2015-11-04T10:48:12.000Z | 2020-07-12T06:46:27.000Z | lib/tray_icon.py | esotericDisciple/nimbus | a79a37d6001d84116831c9f3f99f5d9acfa7fd19 | [
"CC-BY-3.0"
] | null | null | null | lib/tray_icon.py | esotericDisciple/nimbus | a79a37d6001d84116831c9f3f99f5d9acfa7fd19 | [
"CC-BY-3.0"
] | 1 | 2021-05-05T13:56:49.000Z | 2021-05-05T13:56:49.000Z | #! /usr/bin/env python3
# ------------
# tray_icon.py
# ------------
# Author: Daniel Sim (foxhead128)
# License: See LICENSE.md for more details.
# Description: This module contains a system tray icon class, used by Nimbus
# as it runs in the background.
# Import everything we need.
import sys
import subprocess
import common
import browser
import translate
import settings
import session
from translate import tr
# Extremely specific imports from PyQt5/PySide.
# We give PyQt5 priority because it supports Qt5.
if not common.pyqt4:
from PyQt5.QtCore import pyqtSignal, Qt, QTimer, QSize
Signal = pyqtSignal
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QWidget, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
else:
try:
from PyQt4.QtCore import pyqtSignal, Qt, QTimer, QSize
Signal = pyqtSignal
from PyQt4.QtGui import QWidget, QCursor, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
except:
from PySide.QtCore import Signal, Qt, QTimer, QSize
from PySide.QtGui import QWidget, QCursor, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
# System tray icon.
| 39.520362 | 156 | 0.639112 | #! /usr/bin/env python3
# ------------
# tray_icon.py
# ------------
# Author: Daniel Sim (foxhead128)
# License: See LICENSE.md for more details.
# Description: This module contains a system tray icon class, used by Nimbus
# as it runs in the background.
# Import everything we need.
import sys
import subprocess
import common
import browser
import translate
import settings
import session
from translate import tr
# Extremely specific imports from PyQt5/PySide.
# We give PyQt5 priority because it supports Qt5.
if not common.pyqt4:
from PyQt5.QtCore import pyqtSignal, Qt, QTimer, QSize
Signal = pyqtSignal
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QWidget, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
else:
try:
from PyQt4.QtCore import pyqtSignal, Qt, QTimer, QSize
Signal = pyqtSignal
from PyQt4.QtGui import QWidget, QCursor, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
except:
from PySide.QtCore import Signal, Qt, QTimer, QSize
from PySide.QtGui import QWidget, QCursor, QApplication, QMenu, QAction, QSystemTrayIcon, QDesktopWidget, QMessageBox, QToolButton, QToolBar, QLabel
class BackgroundToolBar(QToolBar):
def __init__(self, *args, **kwargs):
super(BackgroundToolBar, self).__init__(*args, **kwargs)
self.setIconSize(QSize(22, 22))
self.shownOnce = False
self.desktopWidget = QDesktopWidget()
self.setWindowFlags(Qt.FramelessWindowHint|Qt.WindowStaysOnTopHint)
def halfScreenHeight(self):
return int((self.desktopWidget.height()-self.height())/2)
def halfScreenWidth(self):
return int((self.desktopWidget.width()-self.width())/2)
def show(self):
super(BackgroundToolBar, self).show()
if not self.shownOnce:
self.move(self.desktopWidget.width()-self.width(), self.halfScreenHeight())
self.shownOnce = True
def mousePressEvent(self, ev):
if ev.button() != Qt.LeftButton:
return QToolBar.mousePressEvent(self, ev)
else:
if not QApplication.instance().keyboardModifiers() in (Qt.ControlModifier, Qt.ShiftModifier, Qt.AltModifier):
QApplication.setOverrideCursor(Qt.SizeAllCursor)
self.mouseX = ev.globalX()
self.origX = self.x()
self.mouseY = ev.globalY()
self.origY = self.y()
def mouseMoveEvent(self, ev):
if self.mouseX and self.mouseY and not self.isMaximized():
self.move(self.origX + ev.globalX() - self.mouseX,
self.origY + ev.globalY() - self.mouseY)
def mouseReleaseEvent(self, ev):
QApplication.restoreOverrideCursor()
x = False
y = False
if self.x() + self.width() > self.desktopWidget.width():
self.move(self.desktopWidget.width()-self.width(), self.y())
x = True
elif self.x() < 0:
self.move(0, self.y())
x = True
if x and self.halfScreenHeight() - 64 <= self.y() <= self.halfScreenHeight() + 64:
self.move(self.x(), self.halfScreenHeight())
if self.y() < 0:
self.move(self.x(), 0)
y = True
elif self.y() + self.height() > self.desktopWidget.height():
self.move(self.x(), self.desktopWidget.height()-self.height())
y = True
if y and self.halfScreenWidth() - 64 <= self.x() <= self.halfScreenWidth() + 64:
self.move(self.halfScreenWidth(), self.y())
return QToolBar.mouseReleaseEvent(self, ev)
# System tray icon.
class SystemTrayIcon(QSystemTrayIcon):
newWindowRequested = Signal()
windowReopenRequested = Signal()
def __init__(self, parent):
super(SystemTrayIcon, self).__init__(common.app_icon, parent)
# Set tooltip.
self.setToolTip(common.app_name)
self.widget = QWidget(None)
self.widget.resize(0, 0)
self.widget.setWindowFlags(Qt.FramelessWindowHint)
# Set context menu.
self.menu = QMenu(None)
self.setContextMenu(self.menu)
self.activated.connect(self.showMenu)
# New window action
newWindowAction = QAction(common.complete_icon("window-new"), tr("&New Window"), self)
newWindowAction.triggered.connect(self.newWindowRequested.emit)
self.menu.addAction(newWindowAction)
# Reopen window action
reopenWindowAction = QAction(common.complete_icon("reopen-window"), tr("R&eopen Window"), self)
reopenWindowAction.triggered.connect(self.reopenWindow)
self.menu.addAction(reopenWindowAction)
self.menu.addSeparator()
self.sessionManager = session.SessionManager(None)
# Load session action
loadSessionAction = QAction(common.complete_icon("document-open"), tr("&Load Session..."), self)
loadSessionAction.triggered.connect(self.loadSession)
self.menu.addAction(loadSessionAction)
# Save session action
saveSessionAction = QAction(common.complete_icon("document-save-as"), tr("Sa&ve Session..."), self)
saveSessionAction.triggered.connect(self.saveSession)
self.menu.addAction(saveSessionAction)
self.menu.addSeparator()
# Settings action
settingsAction = QAction(common.complete_icon("preferences-system"), tr("&Settings..."), self)
settingsAction.triggered.connect(self.openSettings)
self.menu.addAction(settingsAction)
# Clippings manager
clippingsAction = QAction(common.complete_icon("edit-paste"), tr("&Manage Clippings..."), self)
clippingsAction.triggered.connect(self.openClippings)
self.menu.addAction(clippingsAction)
self.menu.addSeparator()
# About Nimbus action.
aboutAction = QAction(common.complete_icon("help-about"), tr("A&bout Nimbus"), self)
aboutAction.triggered.connect(self.about)
self.menu.addAction(aboutAction)
# Quit action
quitAction = QAction(common.complete_icon("application-exit"), tr("Quit"), self)
quitAction.triggered.connect(QApplication.quit)
self.menu.addAction(quitAction)
"""if self.geometry().width() < 8:
self.toolBar = BackgroundToolBar(None)
self.toolBar.setWindowTitle(common.app_name)
#self.toolBar.setStyleSheet("QToolBar{background:palette(window);border:0;}")
self.button = QToolButton(self.toolBar)
self.button.setIcon(common.app_icon)
self.button.clicked.connect(self.showMenu)
self.toolBar.addWidget(self.button)
extender = QLabel(common.app_name, self.toolBar)
self.toolBar.addWidget(extender)
self.toolBar.hide()
timer = QTimer(timeout=self.toggleButton, parent=self)
timer.start(500)"""
def toggleButton(self):
if len(browser.windows) == 0:
self.toolBar.show()
else:
self.toolBar.hide()
# Show menu.
def showMenu(self, reason=None):
self.menu.show()
if reason == QSystemTrayIcon.Trigger or not reason:
y = QDesktopWidget()
self.menu.move(min(QCursor.pos().x(), y.width() - self.menu.width()), min(QCursor.pos().y(), y.height() - self.menu.height()))
y.deleteLater()
# About.
def about(self):
try: parent = browser.windows[-1]
except:
parent = self.widget
self.widget.show()
QMessageBox.about(parent, tr("About Nimbus"),\
"<h3>" + common.app_name + " " +\
common.app_version +\
"</h3>" +\
tr("A Qt-based web browser made in Python."))
self.widget.hide()
# Reopen window.
def reopenWindow(self):
session.reopenWindow()
# Open settings dialog.
def openSettings(self):
settings.settingsDialog.show()
# Open clippings manager.
def openClippings(self):
settings.clippingsManager.show()
def loadSession(self):
self.sessionManager.show()
def saveSession(self):
session.saveSessionManually()
def showMessage(self, title, msg, icon=QSystemTrayIcon.Information, millisecondsTimeoutHint=10000):
if sys.platform.startswith("linux"):
subprocess.Popen(["notify-send", "--expire-time", str(millisecondsTimeoutHint), "--icon=dialog-information", "%s\n\n%s" % (title, msg,)])
else:
super(SystemTrayIcon, self).showMessage(title, msg)
| 6,690 | 479 | 227 |
bd51debb9e1f1e2e12da27f3e43949379ad725e9 | 1,089 | py | Python | src/m1r_functions.py | wilcoxeb/02-ObjectsFunctionsAndMethods-1 | 9a834b48b50f30d46a82ec019e1928fc449416e3 | [
"MIT"
] | null | null | null | src/m1r_functions.py | wilcoxeb/02-ObjectsFunctionsAndMethods-1 | 9a834b48b50f30d46a82ec019e1928fc449416e3 | [
"MIT"
] | null | null | null | src/m1r_functions.py | wilcoxeb/02-ObjectsFunctionsAndMethods-1 | 9a834b48b50f30d46a82ec019e1928fc449416e3 | [
"MIT"
] | null | null | null | ###############################################################################
# Done: READ the code below. TRACE (by hand) the execution of the code,
# predicting what will get printed. Then run the code
# and compare your prediction to what actually was printed.
# Then mark this _TODO_ as DONE and commit-and-push your work.
#
###############################################################################
main()
# hello snow white how are things?
# goodbye bashful see you later!
# Ciao
# Bai Bai
# hello grumpy how are things?
# hello sleepy how are things?
# hello magic mirror how are things?
# goodbye cruel queen see you later?
# Ciao
# Bai Bai
| 23.673913 | 79 | 0.577594 | ###############################################################################
# Done: READ the code below. TRACE (by hand) the execution of the code,
# predicting what will get printed. Then run the code
# and compare your prediction to what actually was printed.
# Then mark this _TODO_ as DONE and commit-and-push your work.
#
###############################################################################
def main():
hello("Snow White")
goodbye("Bashful")
hello("Grumpy")
hello("Sleepy")
hello_and_goodbye("Magic Mirror", "Cruel Queen")
def hello(friend):
print("Hello,", friend, "- how are things?")
def goodbye(friend):
print("Goodbye,", friend, '- see you later!')
print(' Ciao!')
print(' Bai bai!')
def hello_and_goodbye(person1, person2):
hello(person1)
goodbye(person2)
main()
# hello snow white how are things?
# goodbye bashful see you later!
# Ciao
# Bai Bai
# hello grumpy how are things?
# hello sleepy how are things?
# hello magic mirror how are things?
# goodbye cruel queen see you later?
# Ciao
# Bai Bai
| 331 | 0 | 92 |
264c94a6d5920632135b94eadd0c9c666c966b32 | 3,264 | py | Python | ee/clickhouse/sql/retention/retention.py | lharress/posthog | 73809d54b14ffc1b6ad6f600e0e4f06ab3090cb1 | [
"MIT"
] | null | null | null | ee/clickhouse/sql/retention/retention.py | lharress/posthog | 73809d54b14ffc1b6ad6f600e0e4f06ab3090cb1 | [
"MIT"
] | null | null | null | ee/clickhouse/sql/retention/retention.py | lharress/posthog | 73809d54b14ffc1b6ad6f600e0e4f06ab3090cb1 | [
"MIT"
] | null | null | null | RETENTION_SQL = """
SELECT
datediff(%(period)s, {trunc_func}(toDateTime(%(start_date)s)), reference_event.event_date) as base_interval,
datediff(%(period)s, reference_event.event_date, {trunc_func}(toDateTime(event_date))) as intervals_from_base,
COUNT(DISTINCT event.target) count
FROM (
{returning_event_query}
) event
JOIN (
{target_event_query}
) reference_event
ON (event.target = reference_event.target)
WHERE {trunc_func}(event.event_date) > {trunc_func}(reference_event.event_date)
GROUP BY base_interval, intervals_from_base
ORDER BY base_interval, intervals_from_base
"""
RETENTION_BREAKDOWN_SQL = """
SELECT
target_event.breakdown_values AS breakdown_values,
datediff(
%(period)s,
target_event.event_date,
dateTrunc(%(period)s, toDateTime(returning_event.event_date))
) AS intervals_from_base,
COUNT(DISTINCT returning_event.target) AS count
FROM
({returning_event_query}) AS returning_event
JOIN ({target_event_query}) target_event
ON returning_event.target = target_event.target
WHERE
dateTrunc(%(period)s, returning_event.event_date) >
dateTrunc(%(period)s, target_event.event_date)
GROUP BY
breakdown_values,
intervals_from_base
ORDER BY
breakdown_values,
intervals_from_base
"""
REFERENCE_EVENT_SQL = """
SELECT DISTINCT
{trunc_func}(e.timestamp) as event_date,
pdi.person_id as person_id,
e.uuid as uuid,
e.event as event
from events e JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
where toDateTime(e.timestamp) >= toDateTime(%(reference_start_date)s) AND toDateTime(e.timestamp) <= toDateTime(%(reference_end_date)s)
AND e.team_id = %(team_id)s {target_query} {filters}
"""
REFERENCE_EVENT_UNIQUE_SQL = """
SELECT DISTINCT
min({trunc_func}(e.timestamp)) as event_date,
pdi.person_id as person_id,
argMin(e.uuid, {trunc_func}(e.timestamp)) as min_uuid,
argMin(e.event, {trunc_func}(e.timestamp)) as min_event
from events e JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
WHERE e.team_id = %(team_id)s {target_query} {filters}
GROUP BY person_id HAVING
event_date >= toDateTime(%(reference_start_date)s) AND event_date <= toDateTime(%(reference_end_date)s)
"""
RETENTION_PEOPLE_SQL = """
SELECT DISTINCT person_id
FROM events e join ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
where toDateTime(e.timestamp) >= toDateTime(%(start_date)s) AND toDateTime(e.timestamp) <= toDateTime(%(end_date)s)
AND e.team_id = %(team_id)s AND person_id IN (
SELECT person_id FROM ({reference_event_query}) as persons
) {target_query} {filters}
LIMIT 100 OFFSET %(offset)s
"""
INITIAL_INTERVAL_SQL = """
SELECT datediff(%(period)s, {trunc_func}(toDateTime(%(start_date)s)), event_date) event_date,
count(DISTINCT target) FROM (
{reference_event_sql}
) GROUP BY event_date ORDER BY event_date
"""
INITIAL_BREAKDOWN_INTERVAL_SQL = """
SELECT
target_event.breakdown_values AS breakdown_values,
count(DISTINCT target_event.target)
FROM ({reference_event_sql}) AS target_event
GROUP BY breakdown_values ORDER BY breakdown_values
"""
| 34.357895 | 135 | 0.736213 | RETENTION_SQL = """
SELECT
datediff(%(period)s, {trunc_func}(toDateTime(%(start_date)s)), reference_event.event_date) as base_interval,
datediff(%(period)s, reference_event.event_date, {trunc_func}(toDateTime(event_date))) as intervals_from_base,
COUNT(DISTINCT event.target) count
FROM (
{returning_event_query}
) event
JOIN (
{target_event_query}
) reference_event
ON (event.target = reference_event.target)
WHERE {trunc_func}(event.event_date) > {trunc_func}(reference_event.event_date)
GROUP BY base_interval, intervals_from_base
ORDER BY base_interval, intervals_from_base
"""
RETENTION_BREAKDOWN_SQL = """
SELECT
target_event.breakdown_values AS breakdown_values,
datediff(
%(period)s,
target_event.event_date,
dateTrunc(%(period)s, toDateTime(returning_event.event_date))
) AS intervals_from_base,
COUNT(DISTINCT returning_event.target) AS count
FROM
({returning_event_query}) AS returning_event
JOIN ({target_event_query}) target_event
ON returning_event.target = target_event.target
WHERE
dateTrunc(%(period)s, returning_event.event_date) >
dateTrunc(%(period)s, target_event.event_date)
GROUP BY
breakdown_values,
intervals_from_base
ORDER BY
breakdown_values,
intervals_from_base
"""
REFERENCE_EVENT_SQL = """
SELECT DISTINCT
{trunc_func}(e.timestamp) as event_date,
pdi.person_id as person_id,
e.uuid as uuid,
e.event as event
from events e JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
where toDateTime(e.timestamp) >= toDateTime(%(reference_start_date)s) AND toDateTime(e.timestamp) <= toDateTime(%(reference_end_date)s)
AND e.team_id = %(team_id)s {target_query} {filters}
"""
REFERENCE_EVENT_UNIQUE_SQL = """
SELECT DISTINCT
min({trunc_func}(e.timestamp)) as event_date,
pdi.person_id as person_id,
argMin(e.uuid, {trunc_func}(e.timestamp)) as min_uuid,
argMin(e.event, {trunc_func}(e.timestamp)) as min_event
from events e JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
WHERE e.team_id = %(team_id)s {target_query} {filters}
GROUP BY person_id HAVING
event_date >= toDateTime(%(reference_start_date)s) AND event_date <= toDateTime(%(reference_end_date)s)
"""
RETENTION_PEOPLE_SQL = """
SELECT DISTINCT person_id
FROM events e join ({GET_TEAM_PERSON_DISTINCT_IDS}) pdi on e.distinct_id = pdi.distinct_id
where toDateTime(e.timestamp) >= toDateTime(%(start_date)s) AND toDateTime(e.timestamp) <= toDateTime(%(end_date)s)
AND e.team_id = %(team_id)s AND person_id IN (
SELECT person_id FROM ({reference_event_query}) as persons
) {target_query} {filters}
LIMIT 100 OFFSET %(offset)s
"""
INITIAL_INTERVAL_SQL = """
SELECT datediff(%(period)s, {trunc_func}(toDateTime(%(start_date)s)), event_date) event_date,
count(DISTINCT target) FROM (
{reference_event_sql}
) GROUP BY event_date ORDER BY event_date
"""
INITIAL_BREAKDOWN_INTERVAL_SQL = """
SELECT
target_event.breakdown_values AS breakdown_values,
count(DISTINCT target_event.target)
FROM ({reference_event_sql}) AS target_event
GROUP BY breakdown_values ORDER BY breakdown_values
"""
| 0 | 0 | 0 |
bc8e5dc154ba5e95d7c731a718ebf72a337e32b1 | 3,056 | py | Python | pyspj/entry/cli/entry.py | HansBug/pyspj | ed776cf7d2d1766ee4c2152221d1d3dbdd18d93a | [
"Apache-2.0"
] | null | null | null | pyspj/entry/cli/entry.py | HansBug/pyspj | ed776cf7d2d1766ee4c2152221d1d3dbdd18d93a | [
"Apache-2.0"
] | null | null | null | pyspj/entry/cli/entry.py | HansBug/pyspj | ed776cf7d2d1766ee4c2152221d1d3dbdd18d93a | [
"Apache-2.0"
] | null | null | null | from platform import python_version
from typing import Optional
import click
from click.core import Context, Option
from .base import CONTEXT_SETTINGS, _DEFAULT_RESULT_TYPE, run_test
from ...config.meta import __TITLE__, __VERSION__
from ...models import ResultType
def pyspj_entry(name: str, spj, result_type: str = _DEFAULT_RESULT_TYPE,
version: Optional[str] = None, author: Optional[str] = None, email: Optional[str] = None):
"""
Create your pyspj CLI entry.
:param name: Name of the special judge.
:param spj: Special judge function or string.
:param result_type: Type of result, default is ``FREE``.
:param version: Version information, default is ``None``.
:param author: Author of this special judge, default is ``None``.
:param email: Author email of this special judge, default is ``None``.
:return: A click function, can be used directly to create a CLI program.
"""
result_type = ResultType.loads(result_type)
@click.command(context_settings=CONTEXT_SETTINGS,
help=f"{name.capitalize()} - test a pair of given input and output.")
@click.option('-v', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help="Show special judge's version information.")
@click.option('-i', '--input', 'input_content', type=str, help='Input content of special judge.')
@click.option('-o', '--output', 'output_content', type=str, help='Output content of special judge')
@click.option('-I', '--input_file', type=click.Path(exists=True, dir_okay=False, readable=True),
help='Input file of special judge (if -i is given, this will be ignored).')
@click.option('-O', '--output_file', type=click.Path(exists=True, dir_okay=False, readable=True),
help='Output file of special judge (if -o is given, this will be ignored).')
@click.option('-V', '--value', type=str, multiple=True,
help='Attached values for special judge (do not named as "stdin" or "stdout").')
@click.option('-p', '--pretty', type=bool, is_flag=True,
help='Use pretty mode to print json result.')
return _built_cli
| 49.290323 | 112 | 0.649215 | from platform import python_version
from typing import Optional
import click
from click.core import Context, Option
from .base import CONTEXT_SETTINGS, _DEFAULT_RESULT_TYPE, run_test
from ...config.meta import __TITLE__, __VERSION__
from ...models import ResultType
def pyspj_entry(name: str, spj, result_type: str = _DEFAULT_RESULT_TYPE,
version: Optional[str] = None, author: Optional[str] = None, email: Optional[str] = None):
"""
Create your pyspj CLI entry.
:param name: Name of the special judge.
:param spj: Special judge function or string.
:param result_type: Type of result, default is ``FREE``.
:param version: Version information, default is ``None``.
:param author: Author of this special judge, default is ``None``.
:param email: Author email of this special judge, default is ``None``.
:return: A click function, can be used directly to create a CLI program.
"""
result_type = ResultType.loads(result_type)
def print_version(ctx: Context, param: Option, value: bool) -> None:
if not value or ctx.resilient_parsing:
return
if version:
click.echo(f'Special judge - {name}, version {version}.')
else:
click.echo(f'Special judge - {name}.')
if author:
if email:
click.echo(f'Developed by {author}, {email}.')
else:
click.echo(f'Developed by {author}.')
click.echo(f'Powered by {__TITLE__}, version {__VERSION__}.')
click.echo(f'Based on Python {python_version()}.')
ctx.exit()
@click.command(context_settings=CONTEXT_SETTINGS,
help=f"{name.capitalize()} - test a pair of given input and output.")
@click.option('-v', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help="Show special judge's version information.")
@click.option('-i', '--input', 'input_content', type=str, help='Input content of special judge.')
@click.option('-o', '--output', 'output_content', type=str, help='Output content of special judge')
@click.option('-I', '--input_file', type=click.Path(exists=True, dir_okay=False, readable=True),
help='Input file of special judge (if -i is given, this will be ignored).')
@click.option('-O', '--output_file', type=click.Path(exists=True, dir_okay=False, readable=True),
help='Output file of special judge (if -o is given, this will be ignored).')
@click.option('-V', '--value', type=str, multiple=True,
help='Attached values for special judge (do not named as "stdin" or "stdout").')
@click.option('-p', '--pretty', type=bool, is_flag=True,
help='Use pretty mode to print json result.')
def _built_cli(input_content, output_content,
input_file, output_file, value, pretty):
return run_test(input_content, output_content, input_file, output_file, value, result_type, spj, pretty)
return _built_cli
| 789 | 0 | 53 |
5ab8e6b588336a97cc9361777872cd9fe768a34e | 1,341 | py | Python | Vendetron 3000.py | oglivie6720/Python-PC3 | 55d30326baa0efb5c61f4508aef96aa47ca1211e | [
"MIT"
] | null | null | null | Vendetron 3000.py | oglivie6720/Python-PC3 | 55d30326baa0efb5c61f4508aef96aa47ca1211e | [
"MIT"
] | null | null | null | Vendetron 3000.py | oglivie6720/Python-PC3 | 55d30326baa0efb5c61f4508aef96aa47ca1211e | [
"MIT"
] | null | null | null | print(''' Vendetron 3000
El mejor programa para los vendedores en Panama!!
Izzzz Criminal!!!
''')
#Listas []
#Duplas ()
#Diccionarios {}
ventas_globales = 0.0
lista_vendedores = []
while True:
registro = {}
nombre = input("\nNombre: ") #Se introduce el nombre del vendedor.
while True:
try:
venta_mensual = float(input("Venta Mensual: ")) #Se va a leer las ventas.
break
except:
print("Tas metiendo demencia vivo!!!")
if venta_mensual > 100_000.00:
print("Eres un vendedor estrella!!!")
elif venta_mensual > 90000.00:
print("Te falta poco para la meta!!!")
elif venta_mensual > 50000:
print("Sobreviviste :)")
elif venta_mensual > 0.00:
print("Larga a vender mas sinverguenza")
else:
print("Que haces aqui? DESPEDIDO!!!")
registro["nombre"] = nombre
registro["venta"] = venta_mensual
lista_vendedores.append(registro)
opcion = input("Desea continuar (S/N)")
if opcion == 'N' or opcion == 'n':
break
print("Lista de Vendedores: ")
for x in lista_vendedores:
print(str(x["nombre"]) + ", ")
ventas_globales += float(x["venta"])
print("Ventas Globales:", str(ventas_globales))
print("Venta Maxima:" + str(max([x['venta'] for x in lista_vendedores])))
| 27.9375 | 85 | 0.609247 | print(''' Vendetron 3000
El mejor programa para los vendedores en Panama!!
Izzzz Criminal!!!
''')
#Listas []
#Duplas ()
#Diccionarios {}
ventas_globales = 0.0
lista_vendedores = []
while True:
registro = {}
nombre = input("\nNombre: ") #Se introduce el nombre del vendedor.
while True:
try:
venta_mensual = float(input("Venta Mensual: ")) #Se va a leer las ventas.
break
except:
print("Tas metiendo demencia vivo!!!")
if venta_mensual > 100_000.00:
print("Eres un vendedor estrella!!!")
elif venta_mensual > 90000.00:
print("Te falta poco para la meta!!!")
elif venta_mensual > 50000:
print("Sobreviviste :)")
elif venta_mensual > 0.00:
print("Larga a vender mas sinverguenza")
else:
print("Que haces aqui? DESPEDIDO!!!")
registro["nombre"] = nombre
registro["venta"] = venta_mensual
lista_vendedores.append(registro)
opcion = input("Desea continuar (S/N)")
if opcion == 'N' or opcion == 'n':
break
print("Lista de Vendedores: ")
for x in lista_vendedores:
print(str(x["nombre"]) + ", ")
ventas_globales += float(x["venta"])
print("Ventas Globales:", str(ventas_globales))
print("Venta Maxima:" + str(max([x['venta'] for x in lista_vendedores])))
| 0 | 0 | 0 |
7af1daaac6d64cccfcd170c171b4967f17369f26 | 4,179 | py | Python | test/chemistry/test_driver_gaussian_forces.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 504 | 2018-12-15T16:34:03.000Z | 2022-03-26T11:24:53.000Z | test/chemistry/test_driver_gaussian_forces.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 746 | 2018-12-16T16:44:42.000Z | 2021-07-10T16:59:43.000Z | test/chemistry/test_driver_gaussian_forces.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 421 | 2018-12-22T14:49:00.000Z | 2022-03-04T09:47:07.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Gaussian Forces Driver """
import unittest
from test.chemistry import QiskitChemistryTestCase
from qiskit.chemistry.drivers import GaussianForcesDriver, Molecule
from qiskit.chemistry import QiskitChemistryError
class TestDriverGaussianForces(QiskitChemistryTestCase):
"""Gaussian Forces Driver tests."""
def test_driver_jcf(self):
""" Test the driver works with job control file """
try:
driver = GaussianForcesDriver(
['#p B3LYP/6-31g Freq=(Anharm) Int=Ultrafine SCF=VeryTight',
'',
'CO2 geometry optimization B3LYP/6-31g',
'',
'0 1',
'C -0.848629 2.067624 0.160992',
'O 0.098816 2.655801 -0.159738',
'O -1.796073 1.479446 0.481721',
'',
''
])
result = driver.run()
self._check_driver_result(result)
except QiskitChemistryError:
self.skipTest('GAUSSIAN driver does not appear to be installed')
def test_driver_molecule(self):
""" Test the driver works with Molecule """
try:
driver = GaussianForcesDriver(
molecule=Molecule(geometry=[('C', [-0.848629, 2.067624, 0.160992]),
('O', [0.098816, 2.655801, -0.159738]),
('O', [-1.796073, 1.479446, 0.481721])],
multiplicity=1,
charge=0),
basis='6-31g')
result = driver.run()
self._check_driver_result(result)
except QiskitChemistryError:
self.skipTest('GAUSSIAN driver does not appear to be installed')
def test_driver_logfile(self):
""" Test the driver works with logfile (Gaussian does not need to be installed) """
driver = GaussianForcesDriver(
logfile=self.get_resource_path('test_driver_gaussian_log.txt'))
result = driver.run()
self._check_driver_result(result)
if __name__ == '__main__':
unittest.main()
| 39.056075 | 91 | 0.512084 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Gaussian Forces Driver """
import unittest
from test.chemistry import QiskitChemistryTestCase
from qiskit.chemistry.drivers import GaussianForcesDriver, Molecule
from qiskit.chemistry import QiskitChemistryError
class TestDriverGaussianForces(QiskitChemistryTestCase):
"""Gaussian Forces Driver tests."""
def test_driver_jcf(self):
""" Test the driver works with job control file """
try:
driver = GaussianForcesDriver(
['#p B3LYP/6-31g Freq=(Anharm) Int=Ultrafine SCF=VeryTight',
'',
'CO2 geometry optimization B3LYP/6-31g',
'',
'0 1',
'C -0.848629 2.067624 0.160992',
'O 0.098816 2.655801 -0.159738',
'O -1.796073 1.479446 0.481721',
'',
''
])
result = driver.run()
self._check_driver_result(result)
except QiskitChemistryError:
self.skipTest('GAUSSIAN driver does not appear to be installed')
def test_driver_molecule(self):
""" Test the driver works with Molecule """
try:
driver = GaussianForcesDriver(
molecule=Molecule(geometry=[('C', [-0.848629, 2.067624, 0.160992]),
('O', [0.098816, 2.655801, -0.159738]),
('O', [-1.796073, 1.479446, 0.481721])],
multiplicity=1,
charge=0),
basis='6-31g')
result = driver.run()
self._check_driver_result(result)
except QiskitChemistryError:
self.skipTest('GAUSSIAN driver does not appear to be installed')
def test_driver_logfile(self):
""" Test the driver works with logfile (Gaussian does not need to be installed) """
driver = GaussianForcesDriver(
logfile=self.get_resource_path('test_driver_gaussian_log.txt'))
result = driver.run()
self._check_driver_result(result)
def _check_driver_result(self, watson):
expected = [[352.3005875, 2, 2],
[-352.3005875, -2, -2],
[631.6153975, 1, 1],
[-631.6153975, -1, -1],
[115.653915, 4, 4],
[-115.653915, -4, -4],
[115.653915, 3, 3],
[-115.653915, -3, -3],
[-15.341901966295344, 2, 2, 2],
[-88.2017421687633, 1, 1, 2],
[42.40478531359112, 4, 4, 2],
[26.25167512727164, 4, 3, 2],
[2.2874639206341865, 3, 3, 2],
[0.4207357291666667, 2, 2, 2, 2],
[4.9425425, 1, 1, 2, 2],
[1.6122932291666665, 1, 1, 1, 1],
[-4.194299375, 4, 4, 2, 2],
[-4.194299375, 3, 3, 2, 2],
[-10.20589125, 4, 4, 1, 1],
[-10.20589125, 3, 3, 1, 1],
[2.2973803125, 4, 4, 4, 4],
[2.7821204166666664, 4, 4, 4, 3],
[7.329224375, 4, 4, 3, 3],
[-2.7821200000000004, 4, 3, 3, 3],
[2.2973803125, 3, 3, 3, 3]
]
for i, entry in enumerate(watson.data):
msg = "mode[{}]={} does not match expected {}".format(i, entry, expected[i])
self.assertAlmostEqual(entry[0], expected[i][0], msg=msg)
self.assertListEqual(entry[1:], expected[i][1:], msg=msg)
if __name__ == '__main__':
unittest.main()
| 1,502 | 0 | 27 |
62bfe85bbec69aee93797af981fbec0a80142b2f | 1,342 | py | Python | pyfdas/mavlogparse.py | dimasad/pyfdas3 | d495a7dbfa3f8e96ac9c216e7317c164aa907da1 | [
"MIT"
] | null | null | null | pyfdas/mavlogparse.py | dimasad/pyfdas3 | d495a7dbfa3f8e96ac9c216e7317c164aa907da1 | [
"MIT"
] | null | null | null | pyfdas/mavlogparse.py | dimasad/pyfdas3 | d495a7dbfa3f8e96ac9c216e7317c164aa907da1 | [
"MIT"
] | null | null | null | """MAVLink log parsing utilities."""
import argparse
from pymavlink.dialects.v10 import ceaufmg as mavlink
from pymavlink import mavutil
import numpy as np
def main():
"""Parse a MAVLink log."""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("--condition", default=None,
help="message filter condition")
parser.add_argument("--no-timestamps", dest="notimestamps",
action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--dialect", default="ceaufmg", help="MAVLink dialect")
parser.add_argument("log", metavar="LOG")
args = parser.parse_args()
conn = mavutil.mavlink_connection(args.log, dialect=args.dialect,
notimestamps=args.notimestamps)
conn._link = None
while True:
msg = conn.recv_match(condition=args.condition)
if msg is None:
break
elif msg.get_type() == 'BAD_DATA':
continue
else:
header = msg.get_header()
timestamp = msg._timestamp or 0
fields = [getattr(msg, name) for name in msg.fieldnames]
print(header.msgId, header.srcSystem, header.srcComponent,
timestamp, *fields)
if __name__ == '__main__':
main()
| 32.731707 | 80 | 0.616244 | """MAVLink log parsing utilities."""
import argparse
from pymavlink.dialects.v10 import ceaufmg as mavlink
from pymavlink import mavutil
import numpy as np
def main():
"""Parse a MAVLink log."""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("--condition", default=None,
help="message filter condition")
parser.add_argument("--no-timestamps", dest="notimestamps",
action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--dialect", default="ceaufmg", help="MAVLink dialect")
parser.add_argument("log", metavar="LOG")
args = parser.parse_args()
conn = mavutil.mavlink_connection(args.log, dialect=args.dialect,
notimestamps=args.notimestamps)
conn._link = None
while True:
msg = conn.recv_match(condition=args.condition)
if msg is None:
break
elif msg.get_type() == 'BAD_DATA':
continue
else:
header = msg.get_header()
timestamp = msg._timestamp or 0
fields = [getattr(msg, name) for name in msg.fieldnames]
print(header.msgId, header.srcSystem, header.srcComponent,
timestamp, *fields)
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
008e14ae0353e4965925df736cb8c4b511de10f5 | 2,761 | py | Python | kra/models/Workload.py | smpio/kube-resource-analyzer | d214b7c32bc5e404ac951f66cf0914fcae1f580f | [
"MIT"
] | null | null | null | kra/models/Workload.py | smpio/kube-resource-analyzer | d214b7c32bc5e404ac951f66cf0914fcae1f580f | [
"MIT"
] | 50 | 2021-05-26T14:15:09.000Z | 2021-07-24T12:08:14.000Z | kra/models/Workload.py | smpio/kube-resource-analyzer | d214b7c32bc5e404ac951f66cf0914fcae1f580f | [
"MIT"
] | null | null | null | import enum
import itertools
from collections import defaultdict
from django.db import models
from django.core.validators import MinValueValidator
from utils.django.models import EnumField
| 31.375 | 88 | 0.629844 | import enum
import itertools
from collections import defaultdict
from django.db import models
from django.core.validators import MinValueValidator
from utils.django.models import EnumField
class WorkloadKind(enum.IntEnum):
ReplicaSet = 1
Deployment = 2
DaemonSet = 3
CronJob = 4
StatefulSet = 5
Job = 6
class WorkloadManager(models.Manager):
def get_queryset(self):
return WorkloadQuerySet(self.model, using=self._db)
class Workload(models.Model):
kind = EnumField(enum_class=WorkloadKind)
namespace = models.CharField(max_length=255)
name = models.CharField(max_length=255)
affinity = models.JSONField(blank=True, null=True)
# UNUSED
priority = models.FloatField(validators=[MinValueValidator(0)], default=1)
auto_downgrade = models.BooleanField(default=False)
min_auto_downgrade_interval_sec = models.PositiveIntegerField(blank=True, null=True)
objects = WorkloadManager()
class Meta:
unique_together = ('kind', 'namespace', 'name')
def __str__(self):
return f'{self.kind.name} {self.namespace}/{self.name}'
class WorkloadQuerySet(models.QuerySet):
def _fetch_all(self):
from kra.models import ResourceUsage
super()._fetch_all()
step = self._hints.get('_prefetch_resource_usage_buckets_step', None)
if step is None:
return
workloads = self._result_cache
container_ids = \
itertools.chain.from_iterable(
itertools.chain.from_iterable(
(c.id for c in pod.container_set.all()) for pod in wl.pod_set.all()
) for wl in workloads
)
qs = ResourceUsage.objects.filter(container_id__in=container_ids)\
.annotate(
ts=models.Func(
models.Value(f'{step} seconds'), 'measured_at',
function='time_bucket',
output_field=models.DateTimeField()
),
)\
.values('container_id', 'ts')\
.order_by('container_id', 'ts')\
.annotate(
memory_mi=models.Max('memory_mi'),
cpu_m_seconds=models.Max('cpu_m_seconds'),
)
buckets_by_container_id = defaultdict(list)
for b in qs:
buckets_by_container_id[b.pop('container_id')].append(b)
for wl in workloads:
for pod in wl.pod_set.all():
for c in pod.container_set.all():
c.resource_usage_buckets = buckets_by_container_id[c.id]
def prefetch_resource_usage_buckets(self, step):
clone = self._chain()
clone._hints['_prefetch_resource_usage_buckets_step'] = step
return clone
| 1,670 | 725 | 171 |
3e9b3ff5ed654035e7a6bee703c25cc1ba08b55d | 470 | py | Python | bioprinter/__init__.py | Edinburgh-Genome-Foundry/bioprinter | 782ac94cd18c62b92683e502ad0e350706c091c1 | [
"MIT"
] | 7 | 2016-04-03T18:59:00.000Z | 2021-11-12T14:24:52.000Z | bioprinter/__init__.py | Edinburgh-Genome-Foundry/bioprinter | 782ac94cd18c62b92683e502ad0e350706c091c1 | [
"MIT"
] | 4 | 2019-02-04T13:21:50.000Z | 2021-10-04T16:51:55.000Z | bioprinter/__init__.py | Edinburgh-Genome-Foundry/bioprinter | 782ac94cd18c62b92683e502ad0e350706c091c1 | [
"MIT"
] | 3 | 2016-06-18T23:03:00.000Z | 2021-11-12T14:24:54.000Z | """Bioprinter: print images with colored bacteria and yeast.
This implements the `bioprint` function, which takes an image and writes a CSV
file that the Labcyte Echo dispenser can use to print the pictures on a plate
using yeast, coli, ...
Written by Valentin for the Edinburgh Genome Foundry.
Original idea and Matlab code by Mike Shen:
https://github.com/mshen5/BioPointillism
"""
# __all__ = []
from .bioprinter import bioprint
from .version import __version__
| 27.647059 | 78 | 0.776596 | """Bioprinter: print images with colored bacteria and yeast.
This implements the `bioprint` function, which takes an image and writes a CSV
file that the Labcyte Echo dispenser can use to print the pictures on a plate
using yeast, coli, ...
Written by Valentin for the Edinburgh Genome Foundry.
Original idea and Matlab code by Mike Shen:
https://github.com/mshen5/BioPointillism
"""
# __all__ = []
from .bioprinter import bioprint
from .version import __version__
| 0 | 0 | 0 |
4f2497b38e8d9de04844fee5f2a9d23ece916756 | 738 | py | Python | src/utils/activation_function.py | amitasviper/simply_ml | 0179dff472e27bb7611015fe7c4d24099f0df559 | [
"MIT"
] | null | null | null | src/utils/activation_function.py | amitasviper/simply_ml | 0179dff472e27bb7611015fe7c4d24099f0df559 | [
"MIT"
] | null | null | null | src/utils/activation_function.py | amitasviper/simply_ml | 0179dff472e27bb7611015fe7c4d24099f0df559 | [
"MIT"
] | null | null | null | import numpy as np | 21.085714 | 42 | 0.696477 | import numpy as np
class ActivationFunction(object):
def __call__(self, x):
raise NotImplementedError()
def gradient(self, x):
raise NotImplementedError()
class Sigmoid(ActivationFunction):
def __call__(self, x):
return (1 / (1 + np.exp(-x)))
def gradient(self, x):
value = self.__call__(x)
return value * (1 - value)
class ReLU(ActivationFunction):
def __call__(self, x):
return np.maximum(x, 0)
def gradient(self, x):
return np.minimum(1, self.__call__(x))
class Tanh(ActivationFunction):
def __call__(self, x):
return np.tanh(x)
def gradient(self, x):
return (1 - (x ** 2))
class SoftMax(ActivationFunction):
def __call__(self, x):
exp_raised = np.exp(x)
return (exp_raised / np.sum(exp_raised)) | 336 | 58 | 326 |
70969568bafbe7c12ea43e20fd711079e8da4ad8 | 4,532 | py | Python | codes/STAGE II _ Window method Supervised autoencoder with fine tuning/script.py | gunjanmahindre/NNensemble | 00254d8da3ab81d57ba88607a72d39daf23c1022 | [
"MIT"
] | 1 | 2021-03-18T02:03:22.000Z | 2021-03-18T02:03:22.000Z | codes/STAGE II _ Window method Supervised autoencoder with fine tuning/script.py | gunjanmahindre/NNensemble | 00254d8da3ab81d57ba88607a72d39daf23c1022 | [
"MIT"
] | null | null | null | codes/STAGE II _ Window method Supervised autoencoder with fine tuning/script.py | gunjanmahindre/NNensemble | 00254d8da3ab81d57ba88607a72d39daf23c1022 | [
"MIT"
] | null | null | null | """
AUTOMATION SCRIPT: Supervided Pretrained Autoencoders for Inference in Networks
@author: Gunjan Mahindre
Version: 1.0
Date last modified: Sept. 27 2020
Description:
Run the main autoencoder code for various percentages of deletion
Run the code over "iter" number of iterations
Calculates Mean error, Absolute Hop Distance Error (AHDE) averaged over all iterations.
"""
"""
1. create windows
iterate through the windows
2. create these networks.. do 3 Power law
3. train
4. test - only over observed entries - on virgili network
% = 60, 80, 90, 99, 99.5, 99.9
plot : each window as seperate graph..
cross check whether the window of actual average node degree performs best..
"""
# IMPORT MODULES REQUIRED
print ('here 0')
import os
import random
import numpy as np
import pandas as pd
# import RobustDeepAutoencoder as auto
from RobustDeepAutoencoder import *
from RobustDeepAutoencoder import RDAE
import DeepAE as DAE
import networkx as nx
from evaluate import eval_result
# # ---------------------------------------------------
# # 1. create windows
# windows = [[5,7,9]]
# print ('here 1')
# w = windows[0]
# print ("current window:---------------------------------------------------------------------------------------- ", w)
# # create Directory for this window----------------
# directory = str(w[0]) + '_' + str(w[1]) + '_' + str(w[2])
# # Parent Directory path
# parent_dir = "/content/drive/MyDrive/PhD work/Projects/ensemble/virgili results/fine tuning/"
# # Path
# path = os.path.join(parent_dir, directory)
# Create the directory
# os.mkdir(path)
# print("Directory '% s' created" % directory)
# # ---------------------------------------------------
path = '/content/drive/MyDrive/PhD work/Projects/ensemble/virgili results/fine tuning/'
# path = '/content/drive/MyDrive/PhD work/Projects/ensemble/train bombing results/fine tuning/'
# data_path = '/content/drive/MyDrive/PhD work/data/undirected networks/train bombing/'
data_path = '/content/drive/MyDrive/PhD work/data/undirected networks/virgili emails/'
print ("RESULTS FOR SUPERVISED AUTOENCODERS ")
mean_results = []
abs_results = []
m_STD_results = []
a_STD_results = []
frac_list = [20, 40, 60, 80, 90, 99, 99.5, 99.9]
# frac_list = [20]
print ('here 2')
for fraction in frac_list:
# main_code(fraction, w)
print ("Fraction--------------------------------", fraction)
# for the given fraction----
# run option 1
# window for this option
w1 = [4,5,6]
print ("current window:---------------------------------------------------------------------------------------- ", w1)
hadamard_test1 = main_code(fraction, w1, path, data_path)
hadamard_test1 = np.array(hadamard_test1)
# save the corrected result matrix
print('done with option 1')
# # run option 2
# # window for this option
w2 = [6,7,8]
print ("current window:---------------------------------------------------------------------------------------- ", w2)
hadamard_test2 = main_code(fraction, w2, path, data_path)
hadamard_test2 = np.array(hadamard_test2)
# # save the corrected result matrix
print('done with option 2')
# # check if the same entries are being deleted.. so that we can average these entries===they ARE same :)
# if np.sum(hadamard_test1 - hadamard_test2) == 0:
# print ('same')
# # run option 3
# # window for this option
w3 = [8,9,10]
print ("current window:---------------------------------------------------------------------------------------- ", w3)
hadamard_test3 = main_code(fraction, w3, path, data_path)
hadamard_test3 = np.array(hadamard_test3)
# # save the corrected result matrix
print('done with option 3')
# average the three result matrices
# evaluate this final result
[mean_err, abs_err, mean_std, abs_std] = eval_result(path, w1, w2, w3, hadamard_test1, fraction, data_path)
print(mean_err, abs_err, mean_std, abs_std)
# # append the result to our variables :)
mean_results.append(mean_err)
abs_results.append(abs_err)
m_STD_results.append(mean_std)
a_STD_results.append(abs_std)
# save each result in a text file
filename = '/mean_error.txt'
np.savetxt(path + filename, mean_results)
filename = '/abs_error.txt'
np.savetxt(path + filename, abs_results)
filename = '/mean_STD.txt'
np.savetxt(path + filename, m_STD_results)
filename = '/abs_STD.txt'
np.savetxt(path + filename, a_STD_results)
print (frac_list)
exit() | 30.829932 | 120 | 0.618491 | """
AUTOMATION SCRIPT: Supervided Pretrained Autoencoders for Inference in Networks
@author: Gunjan Mahindre
Version: 1.0
Date last modified: Sept. 27 2020
Description:
Run the main autoencoder code for various percentages of deletion
Run the code over "iter" number of iterations
Calculates Mean error, Absolute Hop Distance Error (AHDE) averaged over all iterations.
"""
"""
1. create windows
iterate through the windows
2. create these networks.. do 3 Power law
3. train
4. test - only over observed entries - on virgili network
% = 60, 80, 90, 99, 99.5, 99.9
plot : each window as seperate graph..
cross check whether the window of actual average node degree performs best..
"""
# IMPORT MODULES REQUIRED
print ('here 0')
import os
import random
import numpy as np
import pandas as pd
# import RobustDeepAutoencoder as auto
from RobustDeepAutoencoder import *
from RobustDeepAutoencoder import RDAE
import DeepAE as DAE
import networkx as nx
from evaluate import eval_result
# # ---------------------------------------------------
# # 1. create windows
# windows = [[5,7,9]]
# print ('here 1')
# w = windows[0]
# print ("current window:---------------------------------------------------------------------------------------- ", w)
# # create Directory for this window----------------
# directory = str(w[0]) + '_' + str(w[1]) + '_' + str(w[2])
# # Parent Directory path
# parent_dir = "/content/drive/MyDrive/PhD work/Projects/ensemble/virgili results/fine tuning/"
# # Path
# path = os.path.join(parent_dir, directory)
# Create the directory
# os.mkdir(path)
# print("Directory '% s' created" % directory)
# # ---------------------------------------------------
path = '/content/drive/MyDrive/PhD work/Projects/ensemble/virgili results/fine tuning/'
# path = '/content/drive/MyDrive/PhD work/Projects/ensemble/train bombing results/fine tuning/'
# data_path = '/content/drive/MyDrive/PhD work/data/undirected networks/train bombing/'
data_path = '/content/drive/MyDrive/PhD work/data/undirected networks/virgili emails/'
print ("RESULTS FOR SUPERVISED AUTOENCODERS ")
mean_results = []
abs_results = []
m_STD_results = []
a_STD_results = []
frac_list = [20, 40, 60, 80, 90, 99, 99.5, 99.9]
# frac_list = [20]
print ('here 2')
for fraction in frac_list:
# main_code(fraction, w)
print ("Fraction--------------------------------", fraction)
# for the given fraction----
# run option 1
# window for this option
w1 = [4,5,6]
print ("current window:---------------------------------------------------------------------------------------- ", w1)
hadamard_test1 = main_code(fraction, w1, path, data_path)
hadamard_test1 = np.array(hadamard_test1)
# save the corrected result matrix
print('done with option 1')
# # run option 2
# # window for this option
w2 = [6,7,8]
print ("current window:---------------------------------------------------------------------------------------- ", w2)
hadamard_test2 = main_code(fraction, w2, path, data_path)
hadamard_test2 = np.array(hadamard_test2)
# # save the corrected result matrix
print('done with option 2')
# # check if the same entries are being deleted.. so that we can average these entries===they ARE same :)
# if np.sum(hadamard_test1 - hadamard_test2) == 0:
# print ('same')
# # run option 3
# # window for this option
w3 = [8,9,10]
print ("current window:---------------------------------------------------------------------------------------- ", w3)
hadamard_test3 = main_code(fraction, w3, path, data_path)
hadamard_test3 = np.array(hadamard_test3)
# # save the corrected result matrix
print('done with option 3')
# average the three result matrices
# evaluate this final result
[mean_err, abs_err, mean_std, abs_std] = eval_result(path, w1, w2, w3, hadamard_test1, fraction, data_path)
print(mean_err, abs_err, mean_std, abs_std)
# # append the result to our variables :)
mean_results.append(mean_err)
abs_results.append(abs_err)
m_STD_results.append(mean_std)
a_STD_results.append(abs_std)
# save each result in a text file
filename = '/mean_error.txt'
np.savetxt(path + filename, mean_results)
filename = '/abs_error.txt'
np.savetxt(path + filename, abs_results)
filename = '/mean_STD.txt'
np.savetxt(path + filename, m_STD_results)
filename = '/abs_STD.txt'
np.savetxt(path + filename, a_STD_results)
print (frac_list)
exit() | 0 | 0 | 0 |
c54199e4889fdf9160eb19f066fbca9a98989a9d | 234 | py | Python | bitbots_test/src/bitbots_test/known_tags.py | bit-bots/bitbots_tools | 9cbd0798dc57a36397a68c994f08cafa5961e1c5 | [
"MIT"
] | null | null | null | bitbots_test/src/bitbots_test/known_tags.py | bit-bots/bitbots_tools | 9cbd0798dc57a36397a68c994f08cafa5961e1c5 | [
"MIT"
] | 35 | 2019-09-11T11:43:18.000Z | 2021-11-24T18:48:54.000Z | bitbots_test/src/bitbots_test/known_tags.py | bit-bots/bitbots_tools | 9cbd0798dc57a36397a68c994f08cafa5961e1c5 | [
"MIT"
] | 1 | 2019-10-27T10:44:16.000Z | 2019-10-27T10:44:16.000Z | """Known test tags so that common tags can safely reused in :func:`bitbots_test.decorators.tag`"""
INTERACTIVE = "interactive"
"Tests which require user interaction in order to be run"
WEBOTS = "webots"
"Tests which require webots"
| 29.25 | 98 | 0.760684 | """Known test tags so that common tags can safely reused in :func:`bitbots_test.decorators.tag`"""
INTERACTIVE = "interactive"
"Tests which require user interaction in order to be run"
WEBOTS = "webots"
"Tests which require webots"
| 0 | 0 | 0 |
1f11efecdcd35b002f6df2d8527818c7dedbf9d2 | 578 | py | Python | LeetCode_MinmValueToGetPositiveStep.py | amukher3/Problem_solutions | 8fa6014a91f295d08cafb989024caa91d99211d9 | [
"Apache-2.0"
] | 1 | 2021-12-28T08:58:51.000Z | 2021-12-28T08:58:51.000Z | LeetCode_MinmValueToGetPositiveStep.py | amukher3/Coding | a330cb04b5dd5cc1c3cf69249417a71586441bc7 | [
"Apache-2.0"
] | null | null | null | LeetCode_MinmValueToGetPositiveStep.py | amukher3/Coding | a330cb04b5dd5cc1c3cf69249417a71586441bc7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 20 01:59:55 2020
@author: abhi0
"""
| 23.12 | 53 | 0.396194 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 20 01:59:55 2020
@author: abhi0
"""
class Solution:
def minStartValue(self, nums: List[int]) -> int:
startVal=1
cnt=startVal
flag=0
while True:
flag=0
for i in range(len(nums)):
startVal=startVal+nums[i]
if startVal<1:
flag=1
break
if flag==0:
return cnt
else:
cnt+=1
startVal=cnt | 444 | -6 | 51 |
963c76c18f9f0ad015cc3196ceac0d538e33c06e | 1,040 | py | Python | rtlog.py | dawson2016/python-test | 54c3cf24a58fc324b89ea896ecc32f57ead01756 | [
"Apache-2.0"
] | 4 | 2019-04-02T05:26:30.000Z | 2019-04-10T10:32:14.000Z | rtlog.py | dawson2016/python-test | 54c3cf24a58fc324b89ea896ecc32f57ead01756 | [
"Apache-2.0"
] | 1 | 2020-12-10T06:45:29.000Z | 2020-12-10T06:45:29.000Z | rtlog.py | dawson2016/python-test | 54c3cf24a58fc324b89ea896ecc32f57ead01756 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#coding:utf-8
#realtime time log
import time
import redis
import json
import urllib2
rc = redis.Redis(host='192.168.1.15',port=6379)
f = open("/var/log/nginx/access.log", "r")
f.seek(0, 2)
while True:
offset = f.tell()
line = f.readline()
if not line:
f.seek(offset)
time.sleep(0.1)
else:
#ip = re.search(r'(?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])',line).group()
ip=line.split(' ')[0]
actime=line.split(' ')[3].split('/')[2].split(':',1)[1]
web=line.split(' ')[6]
res=line.split('"')[3]
client=line.split('"')[5]
#print ip,actime,web
rc.publish("fm110",[ipool(ip),actime,web,res,client])
f.close()
| 28.888889 | 85 | 0.542308 | #!/usr/bin/env python
#coding:utf-8
#realtime time log
import time
import redis
import json
import urllib2
rc = redis.Redis(host='192.168.1.15',port=6379)
def ipool(ip):
url = 'http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip='+ip
postdata = urllib2.urlopen(url).read()
jsondata = json.loads(postdata)
if jsondata['ret'] != -1:
city = jsondata['city']
else:
city=u'局域网'
return city
f = open("/var/log/nginx/access.log", "r")
f.seek(0, 2)
while True:
offset = f.tell()
line = f.readline()
if not line:
f.seek(offset)
time.sleep(0.1)
else:
#ip = re.search(r'(?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])',line).group()
ip=line.split(' ')[0]
actime=line.split(' ')[3].split('/')[2].split(':',1)[1]
web=line.split(' ')[6]
res=line.split('"')[3]
client=line.split('"')[5]
#print ip,actime,web
rc.publish("fm110",[ipool(ip),actime,web,res,client])
f.close()
| 302 | 0 | 22 |
c700867cacd8437692838065bcb4d5e7f800382e | 563 | py | Python | aiogcd/orm/properties/booleanvalue.py | yesway/aiogcd | 285997e4cdcee8de5355e4208977859c1ec88690 | [
"MIT"
] | null | null | null | aiogcd/orm/properties/booleanvalue.py | yesway/aiogcd | 285997e4cdcee8de5355e4208977859c1ec88690 | [
"MIT"
] | null | null | null | aiogcd/orm/properties/booleanvalue.py | yesway/aiogcd | 285997e4cdcee8de5355e4208977859c1ec88690 | [
"MIT"
] | null | null | null | """booleanvalue.py
Created on: May 19, 2017
Author: Jeroen van der Heijden <jeroen@transceptor.technology>
"""
from .value import Value
| 26.809524 | 72 | 0.616341 | """booleanvalue.py
Created on: May 19, 2017
Author: Jeroen van der Heijden <jeroen@transceptor.technology>
"""
from .value import Value
class BooleanValue(Value):
def check_value(self, value):
if not isinstance(value, bool):
raise TypeError(
'Expecting an value of type \'bool\' for property {!r} '
'but received type {!r}.'
.format(self.name, value.__class__.__name__))
def set_value(self, model, value):
self.check_value(value)
super().set_value(model, value)
| 339 | 5 | 77 |
a584933a7021424e15d0803815cf2820e7948a91 | 14,430 | py | Python | components/geometry.py | lietk12/robotic-package-sorting | 081bf93a9efda772d63c4286bb83739d08da7a41 | [
"BSD-2-Clause"
] | null | null | null | components/geometry.py | lietk12/robotic-package-sorting | 081bf93a9efda772d63c4286bb83739d08da7a41 | [
"BSD-2-Clause"
] | null | null | null | components/geometry.py | lietk12/robotic-package-sorting | 081bf93a9efda772d63c4286bb83739d08da7a41 | [
"BSD-2-Clause"
] | null | null | null | """Support for 2-D geometric operations.
Mathematics for poses, frames, and coordinate transformations derived from Peter Corke's
"Robotics, Vision, and Control: Fundamental Algorithms in MATLAB".
"""
from collections import namedtuple
from itertools import chain
import numpy as np
from components.messaging import Broadcaster
from components.util import between, within, iter_first_not_none, min_first
# Coord should be a numpy array representing a column vector.
# Angle should be in radians from the frame's +x axis.
Pose = namedtuple("Pose", ["Coord", "Angle"])
# Angles
def normalize_angle(angle):
"""Converts an angle in radians to an angle with -pi < value <= pi.
This encompasses the output range of the arctan function."""
negative = angle % -(2 * np.pi)
return negative - (2 * np.pi * int(negative / np.pi))
def positive_angle(angle):
"""Converts an angle in radians to an angle with 0 <= value < 2 * pi."""
return angle % (2 * np.pi)
# Vector representations
def to_vector(*values):
"""Converts the input values into a column vector."""
return np.array([[value] for value in values])
def vector_to_tuple(vector):
"""Converts a column vector into a tuple."""
return tuple(row[0] for row in vector)
def vectors_to_flat(vectors):
"""Converts iterable of column vectors to flat tuple of alternating coords."""
return tuple(chain.from_iterable(vector_to_tuple(vector) for vector in vectors))
def homogeneous_form(vector):
"""Returns the homogeneous form of a 2-D column vector."""
return np.vstack([vector, [1]])
def point_form(homogeneous_vector):
"""Returns the 2-D column vector of two elements from the homogeneous form."""
return homogeneous_vector[0:2, 0:1]
def direction_vector(angle):
"""Converts an angle from the +x axis into a unit direction vector."""
return to_vector(np.cos(angle), np.sin(angle))
def to_angle(direction):
"""Convers a direction vector into an angle in radians."""
return np.arctan2(direction[1], direction[0])[0]
# Transformation matrices
def rotation_matrix(angle):
"""Converts an angle from the +x axis into a 2-D rotation matrix."""
return np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
def transformation(pose, x_scale=1, y_scale=1):
"""Returns the homogeneous transformation matrix of a frame to its reference."""
scale_mat = np.array([[x_scale, 0], [0, y_scale]])
rot_mat = rotation_matrix(pose.Angle)
rot_scale_mat = np.dot(scale_mat, rot_mat)
transl = pose.Coord
return np.vstack([np.hstack([rot_scale_mat, transl]), [0, 0, 1]])
def transformation_inverse(pose, x_scale=1, y_scale=1):
"""Returns the homogeneous transformation matrix into a frame from its reference."""
scale_mat = np.array([[x_scale, 0], [0, y_scale]])
rot_mat = rotation_matrix(pose.Angle)
rot_scale_mat = np.dot(scale_mat, rot_mat).transpose()
transl = pose.Coord
return np.vstack([np.hstack([rot_scale_mat, -1 * np.dot(rot_scale_mat, transl)]), [0, 0, 1]])
def compose(transformation_one, transformation_two):
"""Returns the transformation that is the composition of the two inputs."""
return np.dot(transformation_one, transformation_two)
# Transformations
def transform(matrix, frame_coords):
"""Transforms the non-homogeneous 2-D column vector using the homogeneous transformation matrix."""
return point_form(np.dot(matrix, homogeneous_form(frame_coords)))
def transform_x(matrix, frame_x):
"""Converts x-coord in the frame to x-coord in the parent's frame."""
return transform(matrix, to_vector(frame_x, 0))[0][0]
def transform_y(matrix, frame_y):
"""Converts y-coord in the frame to y-coord in the parent's frame."""
return transform(matrix, to_vector(0, frame_y))[1][0]
def transform_all(matrix, vectors):
"""Transforms every vector in a tuple into the parent's frame."""
return tuple(transform(matrix, vector) for vector in vectors)
def rotate_pose(pose, rotation_center, angle):
"""Rotates the pose about the specified point by the specified angle."""
center_to_pose = pose.Coord - rotation_center
center_to_direction = direction_vector(pose.Angle) + center_to_pose
rotation = transformation(Pose(to_vector(0, 0), angle))
transformed_pose = transform(rotation, center_to_pose)
transformed_direction = transform(rotation, center_to_direction)
transformed_angle = to_angle(transformed_direction - transformed_pose)
return Pose(transformed_pose + rotation_center, transformed_angle)
# Geometric primitives
def line_intersection(first_point, first_direction, second_point, second_direction):
"""Finds the intersection (if any) between two lines defined by their points and directions.
Uses the algorithm outlined in Gareth Rees's answer at
http://stackoverflow.com/questions/563198
"""
cross_direction = np.cross(first_direction.flatten(), second_direction.flatten())
difference_point = second_point - first_point
if cross_direction == 0:
return None # Lines are collinear or parallel
second_location = float(np.cross(difference_point, first_direction, axis=0)) / cross_direction
first_location = float(np.cross(difference_point, second_direction, axis=0)) / cross_direction
return (first_location, second_location)
def ray_segment_intersection(ray_point, ray_angle, segment_left, segment_right):
"""Finds the intersection (if any) between the ray and the segment defined by two endpoints.
Uses the algorithm outlined in Gareth Rees's answer at
http://stackoverflow.com/questions/14307158
"""
intersection = line_intersection(ray_point, direction_vector(ray_angle),
segment_left, segment_right - segment_left)
if intersection is None or intersection[0] < 0 or not between(0, 1, intersection[1]):
return None
else:
return intersection[0]
def perpendicular_to_line(point, line_left, line_right):
"""Finds the vector from the point to the nearest point on the line.
Uses the formula from Pablo's answer at http://stackoverflow.com/questions/5227373
"""
line_direction = line_right - line_left
line_direction = line_direction / float(np.linalg.norm(line_direction))
vector_projection = line_direction * np.vdot((point - line_left), line_direction)
return line_left + vector_projection - point
def segment_transformation(from_left, from_right, to_left, to_right):
"""Finds a transformation to move the "from" segment so that it overlaps the "to" line.
The transformation will rotate the "from" vector the minimum angle to become parallel with the
line defined by the "to" line segment, and it will translate the "from" vector the
minimum distance to become collinear with the line defined by the "to" line segment.
Arguments:
All arguments must be given as points in a common parent frame.
from_left: column vector of the "left" end of the line segment to be transformed.
from_right: column vector of the "right" end of the line segment to be transformed.
to_left: column vector of the "left" end of the line segment defining the target line.
The line segment will be rotated so that its "left" segment is closer to to_left.
to_right: column vector of the "right" end of the line segment defining the target line.
The line segment will be rotated so that its "right" segment is closer to to_right.
Return:
A 3-tuple of the center of rotation, the angle to rotate about that point,
and a vector of the subsequent translation.
"""
midpoint = 0.5 * (from_right + from_left) # this will be the center of rotation
return (midpoint,
to_angle(to_right - to_left) - to_angle(from_right - from_left),
perpendicular_to_line(midpoint, to_left, to_right))
class Frame(object):
"""Mix-in to support coordinate transformations from a frame."""
def get_transformation(self):
"""Returns the transformation matrix for efficient composition of transformations."""
(x_scale, y_scale) = self._get_scaling()
return transformation(self.get_pose(), x_scale, y_scale)
def get_transformation_inverse(self):
"""Returns the inverse transformation matrix."""
(x_scale, y_scale) = self._get_scaling()
return transformation_inverse(self.get_pose(), x_scale, y_scale)
# Abstract methods
def get_pose(self):
"""Returns the pose of the Frame relative to its parent Frame."""
pass
def _get_scaling(self):
"""Returns a 2-tuple of the x and y scaling relative to its parent Frame."""
return (1, 1)
class MobileFrame(Frame, Broadcaster):
"""Interface for a mobile Frame."""
# Abstract methods
def reset_pose(self):
"""Resets the frame to its initial pose."""
pass
class Rectangle(Frame):
"""Models a rectangular shape."""
# Implementation of parent abstract methods
def get_center(self):
"""Returns the center of the Wall."""
return self._center
def get_corners(self):
"""Returns a 4-tuple of the corners as column vectors."""
return (self._sides["East"][0], self._sides["North"][0],
self._sides["West"][0], self._sides["South"][0])
def get_side(self, side_name):
"""Returns the specified side."""
return self._sides[side_name]
def in_rectangle(self, coords):
"""Checks whether the coordinate, given as a column vector, is in the Rectangle."""
transformed = transform(self.get_transformation_inverse(), coords)
point = vector_to_tuple(transformed)
return (within(self.__bounds[0], self.__bounds[2], point[0])
and within(self.__bounds[1], self.__bounds[3], point[1]))
def nearest_side(self, coords):
"""Finds the nearest side to the coordinate given in the parent frame as a column vector.
Returns the side as the name of the nearest side.
To identify the nearest side, uses the algorithm outlined in Raymond Manzoni's answer at
http://math.stackexchange.com/questions/194550/
"""
transformed = transform(self.get_transformation_inverse(), coords)
point = vector_to_tuple(transformed)
slope = abs(float(self.__bounds[3] - self.__bounds[1])
/ (self.__bounds[2] - self.__bounds[0]))
if point[1] >= slope * abs(point[0]):
return "North"
elif point[1] <= -slope * abs(point[0]):
return "South"
elif slope * point[0] > abs(point[1]):
return "East"
elif slope * point[0] < -abs(point[1]):
return "West"
def ray_distance_to(self, ray_point, ray_angle, side=None):
"""Returns the distance to the Rectangle from the given ray, if the ray intersects.
The ray should be given in the parent frame as a column vector and an angle.
Returns a 2-tuple of the actual distance and the name of the intersecting side.
If a side is specified, finds the ray distance to that side, rather than the distance
to the first side the ray intersects.
"""
matrix = self.get_transformation()
if side is not None:
distance = ray_segment_intersection(ray_point, ray_angle,
*transform_all(matrix, self.get_side(side)))
return (distance, side)
distances = tuple((ray_segment_intersection(ray_point, ray_angle,
*transform_all(matrix, side)), side_name)
for (side_name, side) in self._sides.items())
try:
return min_first(iter_first_not_none(distances))
except ValueError:
return (None, None)
def point_distance_to(self, point):
"""Returns the distance to the Rectangle from the given point in the parent frame.
Returns a 2-tuple of the distance between the point and the nearest side (as a line)
and the name of the nearest side.
Uses the formula presented in Wolfram MathWorld's "Point-Line Distance--2-Dimensional"
"""
transformed = transform(self.get_transformation_inverse(), point)
side_name = self.nearest_side(transformed)
side = self.get_side(side_name)
distance = (np.linalg.norm(np.cross((side[1] - side[0]).flatten(),
(side[0] - transformed).flatten()))
/ np.linalg.norm(side[1] - side[0]))
return (distance, side_name)
def ray_distance_to(rectangles, coords, angle):
"""Determines the first rectangle hit by the specified ray, and the distance along the ray.
Arguments:
rectangles: an iterable of Rectangles to check.
coords: the origin of the ray, as a column vector, in the parent frame of the rectangels.
angle: the direction of the ray, in radians, in the parent frame of the rectangles.
Return:
If a rectangle is hit by the ray, a 3-tuple of the distance to that rectangle, the
name of the side of the rectangle hit by the ray, and the id of the rectangle.
Otherwise, returns None.
"""
distances = [rectangle.ray_distance_to(coords, angle) + (rectangle.get_id(),)
for rectangle in rectangles]
try:
return min_first(iter_first_not_none(distances))
except ValueError:
return None
| 49.587629 | 103 | 0.681566 | """Support for 2-D geometric operations.
Mathematics for poses, frames, and coordinate transformations derived from Peter Corke's
"Robotics, Vision, and Control: Fundamental Algorithms in MATLAB".
"""
from collections import namedtuple
from itertools import chain
import numpy as np
from components.messaging import Broadcaster
from components.util import between, within, iter_first_not_none, min_first
# Coord should be a numpy array representing a column vector.
# Angle should be in radians from the frame's +x axis.
Pose = namedtuple("Pose", ["Coord", "Angle"])
# Angles
def normalize_angle(angle):
"""Converts an angle in radians to an angle with -pi < value <= pi.
This encompasses the output range of the arctan function."""
negative = angle % -(2 * np.pi)
return negative - (2 * np.pi * int(negative / np.pi))
def positive_angle(angle):
"""Converts an angle in radians to an angle with 0 <= value < 2 * pi."""
return angle % (2 * np.pi)
# Vector representations
def to_vector(*values):
"""Converts the input values into a column vector."""
return np.array([[value] for value in values])
def vector_to_tuple(vector):
"""Converts a column vector into a tuple."""
return tuple(row[0] for row in vector)
def vectors_to_flat(vectors):
"""Converts iterable of column vectors to flat tuple of alternating coords."""
return tuple(chain.from_iterable(vector_to_tuple(vector) for vector in vectors))
def homogeneous_form(vector):
"""Returns the homogeneous form of a 2-D column vector."""
return np.vstack([vector, [1]])
def point_form(homogeneous_vector):
"""Returns the 2-D column vector of two elements from the homogeneous form."""
return homogeneous_vector[0:2, 0:1]
def direction_vector(angle):
"""Converts an angle from the +x axis into a unit direction vector."""
return to_vector(np.cos(angle), np.sin(angle))
def to_angle(direction):
"""Convers a direction vector into an angle in radians."""
return np.arctan2(direction[1], direction[0])[0]
# Transformation matrices
def rotation_matrix(angle):
"""Converts an angle from the +x axis into a 2-D rotation matrix."""
return np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
def transformation(pose, x_scale=1, y_scale=1):
"""Returns the homogeneous transformation matrix of a frame to its reference."""
scale_mat = np.array([[x_scale, 0], [0, y_scale]])
rot_mat = rotation_matrix(pose.Angle)
rot_scale_mat = np.dot(scale_mat, rot_mat)
transl = pose.Coord
return np.vstack([np.hstack([rot_scale_mat, transl]), [0, 0, 1]])
def transformation_inverse(pose, x_scale=1, y_scale=1):
"""Returns the homogeneous transformation matrix into a frame from its reference."""
scale_mat = np.array([[x_scale, 0], [0, y_scale]])
rot_mat = rotation_matrix(pose.Angle)
rot_scale_mat = np.dot(scale_mat, rot_mat).transpose()
transl = pose.Coord
return np.vstack([np.hstack([rot_scale_mat, -1 * np.dot(rot_scale_mat, transl)]), [0, 0, 1]])
def compose(transformation_one, transformation_two):
"""Returns the transformation that is the composition of the two inputs."""
return np.dot(transformation_one, transformation_two)
# Transformations
def transform(matrix, frame_coords):
"""Transforms the non-homogeneous 2-D column vector using the homogeneous transformation matrix."""
return point_form(np.dot(matrix, homogeneous_form(frame_coords)))
def transform_x(matrix, frame_x):
"""Converts x-coord in the frame to x-coord in the parent's frame."""
return transform(matrix, to_vector(frame_x, 0))[0][0]
def transform_y(matrix, frame_y):
"""Converts y-coord in the frame to y-coord in the parent's frame."""
return transform(matrix, to_vector(0, frame_y))[1][0]
def transform_all(matrix, vectors):
"""Transforms every vector in a tuple into the parent's frame."""
return tuple(transform(matrix, vector) for vector in vectors)
def rotate_pose(pose, rotation_center, angle):
"""Rotates the pose about the specified point by the specified angle."""
center_to_pose = pose.Coord - rotation_center
center_to_direction = direction_vector(pose.Angle) + center_to_pose
rotation = transformation(Pose(to_vector(0, 0), angle))
transformed_pose = transform(rotation, center_to_pose)
transformed_direction = transform(rotation, center_to_direction)
transformed_angle = to_angle(transformed_direction - transformed_pose)
return Pose(transformed_pose + rotation_center, transformed_angle)
# Geometric primitives
def line_intersection(first_point, first_direction, second_point, second_direction):
"""Finds the intersection (if any) between two lines defined by their points and directions.
Uses the algorithm outlined in Gareth Rees's answer at
http://stackoverflow.com/questions/563198
"""
cross_direction = np.cross(first_direction.flatten(), second_direction.flatten())
difference_point = second_point - first_point
if cross_direction == 0:
return None # Lines are collinear or parallel
second_location = float(np.cross(difference_point, first_direction, axis=0)) / cross_direction
first_location = float(np.cross(difference_point, second_direction, axis=0)) / cross_direction
return (first_location, second_location)
def ray_segment_intersection(ray_point, ray_angle, segment_left, segment_right):
"""Finds the intersection (if any) between the ray and the segment defined by two endpoints.
Uses the algorithm outlined in Gareth Rees's answer at
http://stackoverflow.com/questions/14307158
"""
intersection = line_intersection(ray_point, direction_vector(ray_angle),
segment_left, segment_right - segment_left)
if intersection is None or intersection[0] < 0 or not between(0, 1, intersection[1]):
return None
else:
return intersection[0]
def perpendicular_to_line(point, line_left, line_right):
"""Finds the vector from the point to the nearest point on the line.
Uses the formula from Pablo's answer at http://stackoverflow.com/questions/5227373
"""
line_direction = line_right - line_left
line_direction = line_direction / float(np.linalg.norm(line_direction))
vector_projection = line_direction * np.vdot((point - line_left), line_direction)
return line_left + vector_projection - point
def segment_transformation(from_left, from_right, to_left, to_right):
"""Finds a transformation to move the "from" segment so that it overlaps the "to" line.
The transformation will rotate the "from" vector the minimum angle to become parallel with the
line defined by the "to" line segment, and it will translate the "from" vector the
minimum distance to become collinear with the line defined by the "to" line segment.
Arguments:
All arguments must be given as points in a common parent frame.
from_left: column vector of the "left" end of the line segment to be transformed.
from_right: column vector of the "right" end of the line segment to be transformed.
to_left: column vector of the "left" end of the line segment defining the target line.
The line segment will be rotated so that its "left" segment is closer to to_left.
to_right: column vector of the "right" end of the line segment defining the target line.
The line segment will be rotated so that its "right" segment is closer to to_right.
Return:
A 3-tuple of the center of rotation, the angle to rotate about that point,
and a vector of the subsequent translation.
"""
midpoint = 0.5 * (from_right + from_left) # this will be the center of rotation
return (midpoint,
to_angle(to_right - to_left) - to_angle(from_right - from_left),
perpendicular_to_line(midpoint, to_left, to_right))
class Frame(object):
"""Mix-in to support coordinate transformations from a frame."""
def __init__(self):
super(Frame, self).__init__()
def get_transformation(self):
"""Returns the transformation matrix for efficient composition of transformations."""
(x_scale, y_scale) = self._get_scaling()
return transformation(self.get_pose(), x_scale, y_scale)
def get_transformation_inverse(self):
"""Returns the inverse transformation matrix."""
(x_scale, y_scale) = self._get_scaling()
return transformation_inverse(self.get_pose(), x_scale, y_scale)
# Abstract methods
def get_pose(self):
"""Returns the pose of the Frame relative to its parent Frame."""
pass
def _get_scaling(self):
"""Returns a 2-tuple of the x and y scaling relative to its parent Frame."""
return (1, 1)
class MobileFrame(Frame, Broadcaster):
"""Interface for a mobile Frame."""
def __init__(self):
super(MobileFrame, self).__init__()
# Abstract methods
def reset_pose(self):
"""Resets the frame to its initial pose."""
pass
class Rectangle(Frame):
"""Models a rectangular shape."""
def __init__(self, center_x, center_y, x_length, y_length, angle=0):
super(Rectangle, self).__init__()
self.__bounds = (-0.5 * x_length, -0.5 * y_length, 0.5 * x_length, 0.5 * y_length)
center = to_vector(center_x, center_y)
self._center = center
self._angle = angle
delta_x = to_vector(0.5 * x_length, 0)
delta_y = to_vector(0, 0.5 * y_length)
self._sides = {
"East": (delta_x - delta_y, delta_x + delta_y),
"North": (delta_x + delta_y, -delta_x + delta_y),
"West": (-delta_x + delta_y, -delta_x - delta_y),
"South": (-delta_x - delta_y, delta_x - delta_y)
}
# Implementation of parent abstract methods
def get_pose(self):
return Pose(self._center, self._angle)
def get_center(self):
"""Returns the center of the Wall."""
return self._center
def get_corners(self):
"""Returns a 4-tuple of the corners as column vectors."""
return (self._sides["East"][0], self._sides["North"][0],
self._sides["West"][0], self._sides["South"][0])
def get_side(self, side_name):
"""Returns the specified side."""
return self._sides[side_name]
def in_rectangle(self, coords):
"""Checks whether the coordinate, given as a column vector, is in the Rectangle."""
transformed = transform(self.get_transformation_inverse(), coords)
point = vector_to_tuple(transformed)
return (within(self.__bounds[0], self.__bounds[2], point[0])
and within(self.__bounds[1], self.__bounds[3], point[1]))
def nearest_side(self, coords):
"""Finds the nearest side to the coordinate given in the parent frame as a column vector.
Returns the side as the name of the nearest side.
To identify the nearest side, uses the algorithm outlined in Raymond Manzoni's answer at
http://math.stackexchange.com/questions/194550/
"""
transformed = transform(self.get_transformation_inverse(), coords)
point = vector_to_tuple(transformed)
slope = abs(float(self.__bounds[3] - self.__bounds[1])
/ (self.__bounds[2] - self.__bounds[0]))
if point[1] >= slope * abs(point[0]):
return "North"
elif point[1] <= -slope * abs(point[0]):
return "South"
elif slope * point[0] > abs(point[1]):
return "East"
elif slope * point[0] < -abs(point[1]):
return "West"
def ray_distance_to(self, ray_point, ray_angle, side=None):
"""Returns the distance to the Rectangle from the given ray, if the ray intersects.
The ray should be given in the parent frame as a column vector and an angle.
Returns a 2-tuple of the actual distance and the name of the intersecting side.
If a side is specified, finds the ray distance to that side, rather than the distance
to the first side the ray intersects.
"""
matrix = self.get_transformation()
if side is not None:
distance = ray_segment_intersection(ray_point, ray_angle,
*transform_all(matrix, self.get_side(side)))
return (distance, side)
distances = tuple((ray_segment_intersection(ray_point, ray_angle,
*transform_all(matrix, side)), side_name)
for (side_name, side) in self._sides.items())
try:
return min_first(iter_first_not_none(distances))
except ValueError:
return (None, None)
def point_distance_to(self, point):
"""Returns the distance to the Rectangle from the given point in the parent frame.
Returns a 2-tuple of the distance between the point and the nearest side (as a line)
and the name of the nearest side.
Uses the formula presented in Wolfram MathWorld's "Point-Line Distance--2-Dimensional"
"""
transformed = transform(self.get_transformation_inverse(), point)
side_name = self.nearest_side(transformed)
side = self.get_side(side_name)
distance = (np.linalg.norm(np.cross((side[1] - side[0]).flatten(),
(side[0] - transformed).flatten()))
/ np.linalg.norm(side[1] - side[0]))
return (distance, side_name)
def ray_distance_to(rectangles, coords, angle):
"""Determines the first rectangle hit by the specified ray, and the distance along the ray.
Arguments:
rectangles: an iterable of Rectangles to check.
coords: the origin of the ray, as a column vector, in the parent frame of the rectangels.
angle: the direction of the ray, in radians, in the parent frame of the rectangles.
Return:
If a rectangle is hit by the ray, a 3-tuple of the distance to that rectangle, the
name of the side of the rectangle hit by the ray, and the id of the rectangle.
Otherwise, returns None.
"""
distances = [rectangle.ray_distance_to(coords, angle) + (rectangle.get_id(),)
for rectangle in rectangles]
try:
return min_first(iter_first_not_none(distances))
except ValueError:
return None
| 781 | 0 | 104 |
150bbc387918aeb44a52ab2c036778839349a901 | 852 | py | Python | aiommy/responses.py | candyboober/aiommy | 94ec95e9e22ed5b2ad411cd6e9cae554c4d6478f | [
"MIT"
] | 2 | 2017-11-19T19:12:22.000Z | 2017-11-19T19:14:41.000Z | aiommy/responses.py | dennypenta/aiommy | 94ec95e9e22ed5b2ad411cd6e9cae554c4d6478f | [
"MIT"
] | null | null | null | aiommy/responses.py | dennypenta/aiommy | 94ec95e9e22ed5b2ad411cd6e9cae554c4d6478f | [
"MIT"
] | null | null | null | from aiohttp.web import HTTPException
from aiommy.json import dumps
JSON_ERROR_KEY = 'error'
| 26.625 | 69 | 0.537559 | from aiohttp.web import HTTPException
from aiommy.json import dumps
JSON_ERROR_KEY = 'error'
class JsonResponse(HTTPException):
def __init__(self, data=None, status=200, dumps=dumps, **kwargs):
if not data:
text = None
self.empty_body = True
else:
text = dumps(data)
self.status_code = status
HTTPException.__init__(self, text=text,
content_type='application/json',
**kwargs)
class JsonErrorResponse(JsonResponse):
def __init__(self, msg=None, status=400, dumps=dumps, **kwargs):
data = None
if msg:
data = {JSON_ERROR_KEY: msg}
super().__init__(data=data,
status=status,
dumps=dumps,
**kwargs)
| 628 | 30 | 98 |
2e4ef8803172c533c29b71e9ffcbbc695a6417f3 | 117 | py | Python | Mundo01/Python/aula09-027.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | Mundo01/Python/aula09-027.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | Mundo01/Python/aula09-027.py | molonti/CursoemVideo---Python | 4f6a7af648f7f619d11e95fa3dc7a33b28fcfa11 | [
"MIT"
] | null | null | null | nome = input('Digite seu nome inteiro: ').split()
print('Primeiro nome: ', nome[0])
print('Último nome: ', nome[-1])
| 29.25 | 49 | 0.649573 | nome = input('Digite seu nome inteiro: ').split()
print('Primeiro nome: ', nome[0])
print('Último nome: ', nome[-1])
| 0 | 0 | 0 |