hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2cd413ab0694b52c176757f5f81d49bb4eb4ab50 | 2,142 | py | Python | w02-calling-functions/team-discount/teach_stretch.py | carloswm85/2021-cs111-programming-with-functions | 73cc376e3f0de60aa0150d33ec95568d217096ec | [
"Unlicense"
] | null | null | null | w02-calling-functions/team-discount/teach_stretch.py | carloswm85/2021-cs111-programming-with-functions | 73cc376e3f0de60aa0150d33ec95568d217096ec | [
"Unlicense"
] | null | null | null | w02-calling-functions/team-discount/teach_stretch.py | carloswm85/2021-cs111-programming-with-functions | 73cc376e3f0de60aa0150d33ec95568d217096ec | [
"Unlicense"
] | null | null | null | """
You work for a retail store that wants to increase sales on Tuesday and
Wednesday, which are the store's slowest sales days. On Tuesday and
Wednesday, if a customer's subtotal is greater than $50, the store will
discount the customer's purchase by 10%.
"""
# Import the datatime module so that
# it can be used in this program.
from datetime import datetime
# The discount rate is 10% and the sales tax rate is 6%.
DISC_RATE = 0.10
SALES_TAX_RATE = 0.06
subtotal = 0
done = False
while not done:
# Get the price from the user.
text = input("Please enter the price: ")
if text.lower() == "done":
done = True
else:
price = float(text)
# Get the quantity from the user.
quantity = int(input("Plesae enter the quantity: "))
subtotal += price * quantity
# Print a blank line.
print()
# Round the subtotal to two digits after
# the decimal and print the subtotal.
subtotal = round(subtotal, 2)
print(f"Subtotal: {subtotal}")
print()
# Call the now() method to get the current date and
# time as a datetime object from the computer's clock.
current_date_and_time = datetime.now()
# Call the isoweekday() method to get the day of
# the week from the current_date_and_time object.
weekday = current_date_and_time.isoweekday()
# if the subtotal is greater than 50 and
# today is Tuesday or Wednesday, compute the discount.
if weekday == 2 or weekday == 3:
if subtotal < 50:
insufficient = 50 - subtotal
print(f"To receive the discount, add {insufficient} to your order.")
else:
discount = round(subtotal * DISC_RATE, 2)
print(f"Discount amount: {discount}")
subtotal -= discount
# Compute the sales tax. Notice that we compute the sales tax
# after computing the discount because the customer does not
# pay sales tax on the full price but on the discounted price.
sales_tax = round(subtotal * SALES_TAX_RATE, 2)
print(f"Sales tax amount: {sales_tax}")
# Compute the total by adding the subtotal and the sales tax.
total = subtotal + sales_tax
# Display the total for the user to see.
print(f"Total: {total:.2f}")
| 30.169014 | 76 | 0.699813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,354 | 0.63212 |
2cd504ebd0e349ea8bbd9fdb492b7ad1e64929f7 | 194 | py | Python | online assessment interview/SE Big Data role/mongoTest1.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 32 | 2020-04-05T08:29:40.000Z | 2022-01-08T03:10:00.000Z | online assessment interview/SE Big Data role/mongoTest1.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 3 | 2021-06-02T04:09:11.000Z | 2022-03-02T14:55:03.000Z | online assessment interview/SE Big Data role/mongoTest1.py | NirmalSilwal/Python- | 6d23112db8366360f0b79bdbf21252575e8eab3e | [
"MIT"
] | 3 | 2020-07-13T05:44:04.000Z | 2021-03-03T07:07:58.000Z | import pymongo
connection = pymongo.MongoClient("localhost", 27017)
database = connection['mydb_01']
collection = database['mycol_01']
data = {'Name' : "Akshay"}
collection.insert_one(data) | 17.636364 | 52 | 0.742268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.226804 |
2cd52f5ec1b4d2a4d6e332b743eb08c5891cf5ce | 13,410 | py | Python | training/loss/styleganV.py | maua-maua-maua/nvGAN | edea24c58646780c9fb8ea942e49708ce9d62421 | [
"MIT"
] | null | null | null | training/loss/styleganV.py | maua-maua-maua/nvGAN | edea24c58646780c9fb8ea942e49708ce9d62421 | [
"MIT"
] | null | null | null | training/loss/styleganV.py | maua-maua-maua/nvGAN | edea24c58646780c9fb8ea942e49708ce9d62421 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch_utils import misc, training_stats
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain): # to be overridden by subclass
raise NotImplementedError()
#----------------------------------------------------------------------------
class StyleGANVLoss(Loss):
def __init__(self, device, G_mapping, G_synthesis, D, augment_pipe=None, style_mixing_prob=0,
r1_gamma=0, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2, video_consistent_aug=True,
sync_batch_start_time=False, motion_reg=0, motion_reg_num_frames=128, motion_reg_batch_size=256,
predict_dists_weight=0):
super().__init__()
self.device = device
self.G_mapping = G_mapping
self.G_synthesis = G_synthesis
self.D = D
self.augment_pipe = augment_pipe
self.style_mixing_prob = style_mixing_prob
self.r1_gamma = r1_gamma
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_weight = pl_weight
self.pl_mean = torch.zeros([], device=device)
self.video_consistent_aug = video_consistent_aug
self.sync_batch_start_time = sync_batch_start_time
self.motion_reg = motion_reg
self.motion_reg_num_frames = motion_reg_num_frames
self.motion_reg_batch_size = motion_reg_batch_size
self.predict_dists_weight = predict_dists_weight
def run_G(self, z, c, t, l, sync):
with misc.ddp_sync(self.G_mapping, sync):
ws = self.G_mapping(z, c, l=l)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G_mapping(torch.randn_like(z), c, l=l, skip_w_avg_update=True)[:, cutoff:]
with misc.ddp_sync(self.G_synthesis, sync):
out = self.G_synthesis(ws, t=t, c=c, l=l)
return out, ws
def run_D(self, img, c, t, sync):
if self.augment_pipe is not None:
if self.video_consistent_aug:
nf, ch, h, w = img.shape
f = self.G_synthesis.motion_encoder.num_frames_per_motion
n = nf // f
img = img.view(n, f * ch, h, w) # [n, f * ch, h, w]
img = self.augment_pipe(img) # [n, f * ch, h, w]
if self.video_consistent_aug:
img = img.view(n * f, ch, h, w) # [n * f, ch, h, w]
with misc.ddp_sync(self.D, sync):
outputs = self.D(img, c, t)
return outputs
def accumulate_gradients(self, phase, real_img, real_c, real_t, gen_z, gen_c, gen_t, gen_l, sync, gain):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
real_img = real_img.view(-1, *real_img.shape[2:]) # [batch_size * num_frames, c, h, w]
if self.sync_batch_start_time:
# Syncing the batch to the same start time
if self.sync_batch_start_time == 'random':
offset = gen_t[random.randint(0, len(gen_t) - 1), 0] # [1]
elif self.sync_batch_start_time == 'zero':
offset = 0 # [1]
elif self.sync_batch_start_time == 'min':
offset = gen_t.min() # [1]
else:
offset = None
if not offset is None:
gen_t = (gen_t - gen_t[:, [0]]) + offset # [batch_size, nf]
# Gmain: Maximize logits for generated images.
if do_Gmain:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, gen_t, gen_l, sync=(sync and not do_Gpl)) # [batch_size * num_frames, c, h, w]
D_out_gen = self.run_D(gen_img, gen_c, gen_t, sync=False) # [batch_size]
training_stats.report('Loss/scores/fake', D_out_gen['image_logits'])
training_stats.report('Loss/signs/fake', D_out_gen['image_logits'].sign())
loss_Gmain = F.softplus(-D_out_gen['image_logits']) # -log(sigmoid(y))
if 'video_logits' in D_out_gen:
loss_Gmain_video = F.softplus(-D_out_gen['video_logits']).mean() # -log(sigmoid(y)) # [1]
training_stats.report('Loss/scores/fake_video', D_out_gen['video_logits'])
training_stats.report('Loss/G/loss_video', loss_Gmain_video)
else:
loss_Gmain_video = 0.0 # [1]
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
(loss_Gmain + loss_Gmain_video).mean().mul(gain).backward()
if self.motion_reg > 0.0:
with torch.autograd.profiler.record_function('Gmotion_reg_forward'):
w = torch.zeros(self.motion_reg_batch_size, self.G_mapping.w_dim, device=self.device) # [batch_size, w_dim]
c = torch.zeros(self.motion_reg_batch_size, self.G_mapping.c_dim) # [batch_size, c_dim]
l = torch.zeros(self.motion_reg_batch_size) # [batch_size]
t = torch.linspace(0, self.G_motion_encoder.max_num_frames, self.motion_reg_num_frames, device=self.device).unsqueeze(0).repeat_interleave(self.motion_reg_batch_size, dim=0) # [batch_size, num_frames]
time_emb_coefs = self.G_motion_encoder(c=c, t=t, l=l, w=w, return_time_embs_coefs=True) # {...}
periods = time_emb_coefs['periods'].view(self.motion_reg_batch_size, self.motion_reg_num_frames, -1) # [batch_size, num_frames, num_feats * num_fourier_feats]
phases = time_emb_coefs['phases'].view(self.motion_reg_batch_size, self.motion_reg_num_frames, -1) # [batch_size, num_frames, num_feats * num_fourier_feats]
periods_logvar = -(periods.var(dim=0) + 1e-8).log() # [num_frames, num_feats * num_fourier_feats]
phases_logvar = -(phases.var(dim=0) + 1e-8).log() # [num_frames, num_feats * num_fourier_feats]
loss_Gmotion_reg = (periods_logvar.mean() + phases_logvar.mean()) * self.motion_reg # [1]
dummy = time_emb_coefs['time_embs'].sum() * 0.0 # [1] <- for DDP consistency
training_stats.report('Loss/G/motion_reg', loss_Gmotion_reg)
with torch.autograd.profiler.record_function('Gmotion_reg_backward'):
(loss_Gmotion_reg + dummy).mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], gen_t[:batch_size], gen_l[:batch_size], sync=sync) # [batch_size * num_frames, c, h, w]
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
loss_Gpl.mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
with torch.no_grad():
gen_img, _gen_ws = self.run_G(gen_z, gen_c, gen_t, gen_l, sync=False) # [batch_size * num_frames, c, h, w]
D_out_gen = self.run_D(gen_img, gen_c, gen_t, sync=False) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', D_out_gen['image_logits'])
training_stats.report('Loss/signs/fake', D_out_gen['image_logits'].sign())
loss_Dgen = F.softplus(D_out_gen['image_logits']) # -log(1 - sigmoid(y))
if self.predict_dists_weight > 0.0:
t_diffs_gen = gen_t[:, 1] - gen_t[:, 0] # [batch_size]
loss_Dgen_dist_preds = F.cross_entropy(D_out_gen['dist_preds'], t_diffs_gen.long()) # [batch_size]
training_stats.report('Loss/D/dist_preds_gen', loss_Dgen_dist_preds)
else:
loss_Dgen_dist_preds = 0.0
if 'video_logits' in D_out_gen:
loss_Dgen_video = F.softplus(D_out_gen['video_logits']).mean() # [1]
training_stats.report('Loss/scores/fake_video', D_out_gen['video_logits'])
else:
loss_Dgen_video = 0.0 # [1]
with torch.autograd.profiler.record_function('Dgen_backward'):
(loss_Dgen + loss_Dgen_video + loss_Dgen_dist_preds).mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
D_out_real = self.run_D(real_img_tmp, real_c, real_t, sync=sync)
training_stats.report('Loss/scores/real', D_out_real['image_logits'])
training_stats.report('Loss/signs/real', D_out_real['image_logits'].sign())
loss_Dreal = 0
loss_Dreal_dist_preds = 0
loss_Dreal_video = 0.0 # [1]
if do_Dmain:
loss_Dreal = F.softplus(-D_out_real['image_logits']) # -log(sigmoid(y))
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
if 'video_logits' in D_out_gen:
loss_Dreal_video = F.softplus(-D_out_real['video_logits']).mean() # [1]
training_stats.report('Loss/scores/real_video', D_out_real['video_logits'])
training_stats.report('Loss/D/loss_video', loss_Dgen_video + loss_Dreal_video)
if self.predict_dists_weight > 0.0:
t_diffs_real = real_t[:, 1] - real_t[:, 0] # [batch_size]
loss_Dreal_dist_preds = F.cross_entropy(D_out_real['dist_preds'], t_diffs_real.long()) # [batch_size]
training_stats.report('Loss/D/dist_preds_real', loss_Dreal_dist_preds)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[D_out_real['image_logits'].sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2) # [batch_size * num_frames_per_sample]
loss_Dr1 = loss_Dr1.view(-1, len(real_img_tmp) // len(D_out_real['image_logits'])).mean(dim=1) # [batch_size]
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
dummy_video_logits = (D_out_real["video_logits"].sum() * 0.0) if "video_logits" in D_out_real else 0.0
with torch.autograd.profiler.record_function(name + '_backward'):
(D_out_real["image_logits"] * 0 + dummy_video_logits + loss_Dreal + loss_Dreal_video + loss_Dr1 + loss_Dreal_dist_preds).mean().mul(gain).backward()
#----------------------------------------------------------------------------
| 58.304348 | 220 | 0.602908 | 12,574 | 0.937658 | 0 | 0 | 0 | 0 | 0 | 0 | 2,803 | 0.209023 |
2cd748d2051b8e82062e8e8a4558cedcec3899bb | 701 | py | Python | spec/__init__.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | 2 | 2020-11-26T07:46:48.000Z | 2021-07-28T08:06:58.000Z | spec/__init__.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | null | null | null | spec/__init__.py | deep-spin/spec-blackboxnlp | 23db7a559e09ff7f63ede06b04cad226432b90db | [
"MIT"
] | null | null | null | """
SpEC
~~~~~~~~~~~~~~~~~~~
Sparsity, Explainability, and Communication
:copyright: (c) 2019 by Marcos Treviso
:licence: MIT, see LICENSE for more details
"""
# Generate your own AsciiArt at:
# patorjk.com/software/taag/#f=Calvin%20S&t=SpEC
__banner__ = """
_____ _____ _____
| __|___| __| |
|__ | . | __| --|
|_____| _|_____|_____|
|_|
"""
__prog__ = "spec"
__title__ = 'SpEC'
__summary__ = 'Sparsity, Explainability, and Communication'
__uri__ = 'https://github.com/mtreviso/spec'
__version__ = '0.0.1'
__author__ = 'Marcos V. Treviso and Andre F. T. Martins'
__email__ = 'marcostreviso@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2019 Marcos Treviso'
| 21.90625 | 59 | 0.663338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.7903 |
2cd7bb87b2723af7e7c3023b2524d9f7f046ca6e | 722 | py | Python | BurstPaperWallet/initialize.py | MrPilotMan/BurstPaperWallet | 5a98a646487d6049f455680fe26ff10185b6d097 | [
"Apache-2.0"
] | 5 | 2018-07-21T09:05:35.000Z | 2018-09-18T16:36:52.000Z | BurstPaperWallet/initialize.py | MrPilotMan/Burst-Paper-Wallet | 5a98a646487d6049f455680fe26ff10185b6d097 | [
"MIT"
] | null | null | null | BurstPaperWallet/initialize.py | MrPilotMan/Burst-Paper-Wallet | 5a98a646487d6049f455680fe26ff10185b6d097 | [
"MIT"
] | null | null | null | from BurstPaperWallet.api import brs_api
from BurstPaperWallet.api import passphrase_url_transform as transform
def initialize(account, old_passphrase, fee=735000):
url = "sendMoney&recipient={}&secretPhrase={}&amountNQT=1&feeNQT={}&recipientPublicKey={}&deadline=1440"\
.format(account["reed solomon"], transform(old_passphrase), fee, account["public key"])
print(brs_api(url))
def check_balance(reed_solomon):
url = "getGuaranteedBalance&account={}".format(reed_solomon)
balance = brs_api(url)
return balance["guaranteedBalanceNQT"]
def adjust_fee(balance, fee):
if fee is None:
fee = 735000
if int(balance) >= fee:
return fee
else:
return balance
| 27.769231 | 109 | 0.710526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.247922 |
2cd8482be7ce334c43cdcfd74e894afdafd98102 | 157 | py | Python | planning/data/__init__.py | XinyuHua/pair-emnlp2020 | 45f8b8ea3752dfb43aa75914afab1b29b2f10c50 | [
"MIT"
] | 20 | 2020-10-10T05:38:14.000Z | 2022-02-15T01:07:39.000Z | planning/data/__init__.py | XinyuHua/pair-emnlp2020 | 45f8b8ea3752dfb43aa75914afab1b29b2f10c50 | [
"MIT"
] | 4 | 2020-10-20T03:29:41.000Z | 2021-04-23T16:10:34.000Z | planning/data/__init__.py | XinyuHua/pair-emnlp2020 | 45f8b8ea3752dfb43aa75914afab1b29b2f10c50 | [
"MIT"
] | 2 | 2021-07-06T01:20:01.000Z | 2021-08-19T05:26:24.000Z | from .dictionary import BertDictionary
from .text_planning_dataset import TextPlanningDataset
__all__ = [
'BertDictionary',
'TextPlanningDataset',
] | 22.428571 | 54 | 0.789809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.235669 |
2cd953c3506112aa015b7a530b4dc9235de2bd55 | 3,850 | py | Python | vaemodel.py | iakash2604/Music-AI-IIT_Delhi | ef564c39a141c828d58536da621ffcc8cec41f9d | [
"MIT"
] | 1 | 2018-04-12T01:57:47.000Z | 2018-04-12T01:57:47.000Z | vaemodel.py | iakash2604/Music-AI-IIT_Delhi | ef564c39a141c828d58536da621ffcc8cec41f9d | [
"MIT"
] | null | null | null | vaemodel.py | iakash2604/Music-AI-IIT_Delhi | ef564c39a141c828d58536da621ffcc8cec41f9d | [
"MIT"
] | null | null | null | import numpy as np
import os
import keras
from keras import regularizers, losses
from keras.models import Sequential, Model
from keras.layers import Lambda, Input, Dense, Dropout, Reshape, BatchNormalization, Softmax, Concatenate
from keras.utils import plot_model
import keras.backend as K
class multiVAE:
def __init__(self, sampleLen, numUnits, enc_denseLayerSizes, enc_denseLayerActivations, enc_dropouts, enc_batchnorms, dec_denseLayerSizes, dec_denseLayerActivations, dec_dropouts, dec_batchnorms, inf_layerSize):
self.sampleLen = sampleLen
self.numUnits = numUnits
self.enc_denseLayerSizes = enc_denseLayerSizes
self.enc_denseLayerActivations = enc_denseLayerActivations
self.enc_dropouts = enc_dropouts
self.enc_batchnorms = enc_batchnorms
self.dec_denseLayerSizes = dec_denseLayerSizes
self.dec_denseLayerActivations = dec_denseLayerActivations
self.dec_dropouts = dec_dropouts
self.dec_batchnorms = dec_batchnorms
self.inf_layerSize = inf_layerSize
self.trainModel = None
self.inf_layer = None
self.genModel = [None]*self.numUnits
def sample_z(self, args):
mean, log_sigma = args
eps = K.random_normal(shape=(32, self.inf_layerSize), mean=0., stddev=1.)
return mean + K.exp(log_sigma / 2) * eps
def createInputList(self):
m = [None]*self.numUnits
for i, _ in enumerate(m):
m[i] = Input(shape = (self.sampleLen, ), name = 'input'+str(i+1))
return m
def encoder(self, m_i, i):
temp = len(self.enc_dropouts)
for l, act, drop, bn, j in zip(self.enc_denseLayerSizes, self.enc_denseLayerActivations, self.enc_dropouts, self.enc_batchnorms, range(temp)):
m_i = Dense(l, activation=act, name=str(i+1)+'enc_dense'+str(j+1))(m_i)
m_i = Dropout(drop, name=str(i+1)+'enc_dropout'+str(j+1))(m_i)
if(bn):
m_i = BatchNormalization(name=str(i+1)+'enc_batchnorm'+str(j+1))(m_i)
return m_i
def decoder(self, z_i, i):
temp = len(self.dec_dropouts)
for l, act, drop, bn, j in zip(self.dec_denseLayerSizes, self.dec_denseLayerActivations, self.dec_dropouts, self.dec_batchnorms, range(temp)):
z_i = Dense(l, activation=act, name=str(i+1)+'dec_dense'+str(j+1))(z_i)
z_i = Dropout(drop, name=str(i+1)+'dec_dropout'+str(j+1))(z_i)
if(bn):
z_i = BatchNormalization(name=str(i+1)+'dec_batchnorm'+str(j+1))(z_i)
return z_i
def createFullNetwork(self):
m = self.createInputList()
y = [None]*self.numUnits
m_ = [None]*self.numUnits
#use list comprehension to make this better
for i, m_i in enumerate(m):
y[i] = self.encoder(m_i, i)
# y_len = y[0].get_shape()[1:].as_list()[0]
z_in = Concatenate()(y)
mean = Dense(self.inf_layerSize, activation='linear', name='mean')(z_in)
log_sigma = Dense(self.inf_layerSize, activation='linear', name='stddev')(z_in)
z_out = Lambda(self.sample_z, name='inf_layer')([mean, log_sigma])
# self.inf_layer = Input(shape = z_out.get_shape().as_list())
# self.createDecoderModel(z_out)
for i in range(self.numUnits):
m_[i] = self.decoder(z_out, i)
self.trainModel = Model(inputs=m, outputs=m_)
plot_model(self.trainModel, to_file='multiVAE.png')
def trainFullNetwork(self):
return None
def extractGenModel(self):
temp = len(self.dec_batchnorms)
self.inf_layer = Input(shape = self.trainModel.get_layer('inf_layer').output_shape, name='gen_input')
for i in range(self.numUnits):
temp_layer = self.inf_layer
for j, bn in zip(range(temp), self.dec_batchnorms):
temp_layer = self.trainModel.get_layer(str(i+1)+'dec_dense'+str(j+1))(temp_layer)
temp_layer = self.trainModel.get_layer(str(i+1)+'dec_dropout'+str(j+1))(temp_layer)
if(bn):
temp_layer = self.trainModel.get_layer(str(i+1)+'dec_batchnorm'+str(j+1))(temp_layer)
self.genModel[i] = Model(inputs=self.inf_layer, outputs=temp_layer)
plot_model(self.genModel[i], to_file='genModel'+str(i+1)+'.png')
| 39.690722 | 212 | 0.72961 | 3,557 | 0.923896 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.102857 |
2cda4cc79b80b9a4194f66d4309cdecba7f0a2e3 | 608 | py | Python | PycharmProjects/untitled1/printGraph.py | jiankangliu/baseOfPython | a10e81c79bc6fc3807ca8715fb1be56df527742c | [
"MIT"
] | null | null | null | PycharmProjects/untitled1/printGraph.py | jiankangliu/baseOfPython | a10e81c79bc6fc3807ca8715fb1be56df527742c | [
"MIT"
] | null | null | null | PycharmProjects/untitled1/printGraph.py | jiankangliu/baseOfPython | a10e81c79bc6fc3807ca8715fb1be56df527742c | [
"MIT"
] | null | null | null | # 第一行一个*,第二行两个*........ 共十行
# 打印乘法口诀
n = 1
while n <= 10:
n1 = n
while n1:
print("*", end = "")
n1 -= 1
print()
n += 1
n2 = 1
n3 = 1
while n2 < 10:
while n3 <= n2:
print(f"{n3}*{n2}={n3*n2}",end="\t")
n3 += 1
print()
n2 += 1
n3 = 1
n4 = 0
while n4 < 40:
n5 = 0
if n4 < 20:
while n5 < n4:
print(' ',end='')
n5 += 1
print('*')
else:
n5 = 39 - n4
while n5:
print(' ',end='')
n5 -= 1
print('*')
n4 += 1
| 16 | 45 | 0.320724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.189815 |
2cda9fc4c3b7e52ae6618733b4ca5902d28c7ffa | 59 | py | Python | src/models/exif_sc/__init__.py | lemonwaffle/nisemono | f2b32dbff63ea6de47460713aac8a768ff59f126 | [
"MIT"
] | 7 | 2021-07-08T05:17:19.000Z | 2021-12-29T05:45:24.000Z | src/models/exif_sc/__init__.py | yizhe-ang/fake-detection-lab | f2b32dbff63ea6de47460713aac8a768ff59f126 | [
"MIT"
] | null | null | null | src/models/exif_sc/__init__.py | yizhe-ang/fake-detection-lab | f2b32dbff63ea6de47460713aac8a768ff59f126 | [
"MIT"
] | null | null | null | from .exif_sc import EXIF_SC
from .networks import EXIF_Net | 29.5 | 30 | 0.847458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2cdb285432c69bfe1c1fbf5662fc9c8781add56e | 766 | py | Python | tests/fixtures.py | luisfmcalado/coinoxr | e7cf95d717aa9b58e458332bfd6fd2d4172d175f | [
"MIT"
] | 2 | 2020-09-05T20:48:54.000Z | 2022-03-28T11:00:15.000Z | tests/fixtures.py | luisfmcalado/coinoxr | e7cf95d717aa9b58e458332bfd6fd2d4172d175f | [
"MIT"
] | null | null | null | tests/fixtures.py | luisfmcalado/coinoxr | e7cf95d717aa9b58e458332bfd6fd2d4172d175f | [
"MIT"
] | null | null | null | import pytest
from tests.stub_client import StubHttpClient
from coinoxr.requestor import Requestor
from coinoxr.response import Response
def content(file):
return StubHttpClient.json(file)["content"]
@pytest.fixture
def client():
client = StubHttpClient()
client.add_app_id("fake_app_id")
client.add_date("2012-07-10")
client.add_date("2012-07-12")
return client
@pytest.fixture
def client_get_mock(mocker):
def client_get_mock(status_code, json):
response = Response(status_code, json)
client = mocker.Mock(StubHttpClient)
client.get = mocker.Mock(return_value=response)
return client
return client_get_mock
@pytest.fixture
def requestor(client):
return Requestor("fake_app_id", client)
| 21.885714 | 55 | 0.73107 | 0 | 0 | 0 | 0 | 550 | 0.718016 | 0 | 0 | 59 | 0.077023 |
2cdbad0e2ea4c368579e59aa921da21b9efa73a0 | 605 | py | Python | CPJIntroduction/CPJIntroduction/app.py | zhaishuai/CPJIntroduction | ee0dafc1f9982708a75d7186cce1c1bfac7419e8 | [
"MIT"
] | null | null | null | CPJIntroduction/CPJIntroduction/app.py | zhaishuai/CPJIntroduction | ee0dafc1f9982708a75d7186cce1c1bfac7419e8 | [
"MIT"
] | null | null | null | CPJIntroduction/CPJIntroduction/app.py | zhaishuai/CPJIntroduction | ee0dafc1f9982708a75d7186cce1c1bfac7419e8 | [
"MIT"
] | null | null | null | #!flask/bin/python
# coding=utf-8
from flask import Flask, jsonify
app = Flask(__name__)
tasks = {
"event_id" : "1.9",
"introductions" : [
{
"title" : "情怀",
"details" : "各种无敌, 各种牛人, 各种挑战, 等你来战",
"image" : "hello.png",
"background_image" : "backgroundImage.png"
},
{
"title" : "钉子",
"details" : "各种硬, 各种尖, 各种钻, 钉子精神",
"image" : "hello.png",
"background_image" : "backgroundImage.png"
}]
}
@app.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify(tasks)
if __name__ == '__main__':
app.run(debug=True) | 20.862069 | 51 | 0.565289 | 0 | 0 | 0 | 0 | 94 | 0.137628 | 0 | 0 | 376 | 0.550512 |
2cdd770cb59585fff2f8363916717357018b2efd | 34,050 | py | Python | hcpre/duke_siemens/util_dicom_siemens.py | beOn/hcpre | 8c56d4f72c06abcb5d2d2b64e7e37fee040f2be4 | [
"BSD-3-Clause"
] | 10 | 2016-09-17T09:28:16.000Z | 2019-07-31T18:40:12.000Z | hcpre/duke_siemens/util_dicom_siemens.py | beOn/hcpre | 8c56d4f72c06abcb5d2d2b64e7e37fee040f2be4 | [
"BSD-3-Clause"
] | 4 | 2017-10-30T19:02:40.000Z | 2018-01-14T00:28:46.000Z | hcpre/duke_siemens/util_dicom_siemens.py | beOn/hcpre | 8c56d4f72c06abcb5d2d2b64e7e37fee040f2be4 | [
"BSD-3-Clause"
] | 5 | 2015-03-30T17:41:32.000Z | 2020-10-15T13:17:22.000Z | """
Routines for extracting data from Siemens DICOM files.
The simplest way to read a file is to call read(filename). If you like you
can also call lower level functions like read_data().
Except for the map of internal data types to numpy type strings (which
doesn't require an import of numpy), this code is deliberately ignorant of
numpy. It returns native Python types that are easy to convert into
numpy types.
"""
# Python modules
from __future__ import division
import struct
import exceptions
import math
# 3rd party modules
import dicom
# Our modules
import util_mrs_file
import constants
TYPE_NONE = 0
TYPE_IMAGE = 1
TYPE_SPECTROSCOPY = 2
# Change to True to enable the assert() statements sprinkled through the code
ASSERTIONS_ENABLED = False
# THese are some Siemens-specific tags
TAG_CONTENT_TYPE = (0x0029, 0x1008)
TAG_SPECTROSCOPY_DATA = (0x7fe1, 0x1010)
# I (Philip) ported much of the private tag parsing code from the IDL routines
# dicom_fill_rsp.pro and dicom_fill_util.pro, except for the CSA header
# parsing which is a port of C++ code in the GDCM project.
# Since a lot (all?) of the Siemens format is undocumented, there are magic
# numbers and logic in here that I can't explain. Sorry! Where appropriate
# I have copied or paraphrased comments from the IDL code; they're marked
# with [IDL]. Unmarked comments are mine. Where ambiguous, I labelled my
# comments with [PS] (Philip Semanchuk).
def read(filename, ignore_data=False):
""" This is the simplest (and recommended) way for our code to read a
Siemens DICOM file.
It returns a tuple of (parameters, data). The parameters are a dict.
The data is in a Python list.
"""
# Since a DICOM file is params + data together, it's not so simple to
# ignore the data part. The best we can do is tell PyDicom to apply
# lazy evaluation which is probably less efficient in the long run.
defer_size = 4096 if ignore_data else 0
dataset = dicom.read_file(filename)
params = read_parameters_from_dataset(dataset)
data = read_data_from_dataset(dataset)
return params, data
def read_parameters(filename):
return read_parameters_from_dataset(dicom.read_file(filename))
def read_data(filename):
return read_data_from_dataset(dicom.read_file(filename))
def read_data_from_dataset(dataset):
"""Given a PyDicom dataset, returns the data in the Siemens DICOM
spectroscopy data tag (0x7fe1, 0x1010) as a list of complex numbers.
"""
data = _get(dataset, TAG_SPECTROSCOPY_DATA)
if data:
# Big simplifying assumptions --
# 1) Data is a series of complex numbers organized as ririri...
# where r = real and i = imaginary.
# 2) Each real & imaginary number is a 4 byte float.
# 3) Data is little endian.
data = struct.unpack("<%df" % (len(data) / 4), data)
data = util_mrs_file.collapse_complexes(data)
else:
data = [ ]
return data
def read_parameters_from_dataset(dataset):
"""Given a PyDicom dataset, returns a fairly extensive subset of the
parameters therein as a dictionary.
"""
params = { }
# The code below refers to slice_index as a variable, but here it is
# hardcoded to one. It could vary, in theory, but in practice I don't
# know how it would actually be used. How would the slice index or
# indices be passed? How would the data be returned? For now, I'll
# leave the slice code active but hardcode the index to 1.
slice_index = 1
# [PS] - Even after porting this code I still can't figure out what
# ptag_img and ptag_ser stand for, so I left the names as is.
ptag_img = { }
ptag_ser = { }
# (0x0029, 0x__10) is one of several possibilities
# - SIEMENS CSA NON-IMAGE, CSA Data Info
# - SIEMENS CSA HEADER, CSA Image Header Info
# - SIEMENS CSA ENVELOPE, syngo Report Data
# - SIEMENS MEDCOM HEADER, MedCom Header Info
# - SIEMENS MEDCOM OOG, MedCom OOG Info (MEDCOM Object Oriented Graphics)
# Pydicom identifies it as "CSA Image Header Info"
for tag in ( (0x0029, 0x1010), (0x0029, 0x1210), (0x0029, 0x1110) ):
tag_data = dataset.get(tag, None)
if tag_data:
break
if tag_data:
ptag_img = _parse_csa_header(tag_data.value)
# [IDL] Access the SERIES Shadow Data
# [PS] I don't know what makes this "shadow" data.
for tag in ( (0x0029, 0x1020), (0x0029, 0x1220), (0x0029, 0x1120) ):
tag_data = dataset.get(tag, None)
if tag_data:
break
if tag_data:
ptag_ser = _parse_csa_header(tag_data.value)
# [IDL] "MrProtocol" (VA25) and "MrPhoenixProtocol" (VB13) are special
# elements that contain many parameters.
if ptag_ser.get("MrProtocol", ""):
prot_ser = _parse_protocol_data(ptag_ser["MrProtocol"])
if ptag_ser.get("MrPhoenixProtocol", ""):
prot_ser = _parse_protocol_data(ptag_ser["MrPhoenixProtocol"])
# [IDL] Determine if file is SVS,SI,EPSI, or OTHER
# [PS] IDL code doesn't match comments. Possibilities appear to
# include EPSI, SVS, CSI, JPRESS and SVSLIP2. "OTHER" isn't
# considered.
# EPSI = Echo-Planar Spectroscopic Imaging
# SVS = Single voxel spectroscopy
# CSI = Chemical Shift Imaging
# JPRESS = J-resolved spectroscopy
# SVSLIP2 = No idea!
is_epsi = False
is_svs = False
is_csi = False
is_jpress = False
is_svslip2 = False
# [IDL] Protocol name
parameter_filename = _extract_from_quotes(prot_ser.get("tProtocolName", ""))
parameter_filename = parameter_filename.strip()
# [IDL] Sequence file name
sequence_filename = _extract_from_quotes(prot_ser.get("tSequenceFileName", ""))
sequence_filename = sequence_filename.strip()
sequence_filename2 = ptag_img.get("SequenceName", "")
sequence_filename2 = sequence_filename2.strip()
parameter_filename_lower = parameter_filename.lower()
sequence_filename_lower = sequence_filename.lower()
sequence_filename2_lower = sequence_filename2.lower()
is_epsi = ("epsi" in (parameter_filename_lower, sequence_filename_lower))
is_svs = ("svs" in (parameter_filename_lower, sequence_filename_lower,
sequence_filename2_lower))
if "fid" in (parameter_filename_lower, sequence_filename_lower):
if "csi" in (parameter_filename_lower, sequence_filename_lower):
is_csi = True
else:
is_svs = True
if "csi" in (parameter_filename_lower, sequence_filename_lower):
is_csi = True
is_jpress = ("jpress" in (parameter_filename_lower,
sequence_filename_lower))
is_svslip2 = ("svs_li2" in (parameter_filename_lower,
sequence_filename2_lower))
# Patient Info
params["patient_name"] = _get(dataset, (0x0010, 0x0010), "")
params["patient_id"] = _get(dataset, (0x0010, 0x0020))
params["patient_birthdate"] = _get(dataset, (0x0010, 0x0030))
params["patient_sex"] = _get(dataset, (0x0010, 0x0040), "")
# [PS] Siemens stores the age as nnnY where 'n' is a digit, e.g. 042Y
params["patient_age"] = \
int(_get(dataset, (0x0010, 0x1010), "000Y")[:3])
params["patient_weight"] = round(_get(dataset, (0x0010, 0x1030), 0))
params["study_code"] = _get(dataset, (0x0008, 0x1030), "")
# Identification info
params["bed_move_fraction"] = 0.0
s = _get(dataset, (0x0008, 0x0080), "")
if s:
s = " " + s
s += _get(dataset, (0x0008, 0x1090), "")
params["institution_id"] = s
params["parameter_filename"] = parameter_filename
params["study_type"] = "spec"
# DICOM date format is YYYYMMDD
params["bed_move_date"] = _get(dataset, (0x0008, 0x0020), "")
params["measure_date"] = params["bed_move_date"]
# DICOM time format is hhmmss.fraction
params["bed_move_time"] = _get(dataset, (0x0008, 0x0030), "")
params["comment_1"] = _get(dataset, (0x0008, 0x0031), "")
if not params["comment_1"]:
params["comment_1"] = _get(dataset, (0x0020, 0x4000), "")
# DICOM time format is hhmmss.fraction
params["measure_time"] = _get(dataset, (0x0008, 0x0032), "")
params["sequence_filename"] = ptag_img.get("SequenceName", "")
params["sequence_type"] = ptag_img.get("SequenceName", "")
# Measurement info
params["echo_position"] = "0.0"
params["image_contrast_mode"] = "unknown"
params["kspace_mode"] = "unknown"
params["measured_slices"] = "1"
params["saturation_bands"] = "0"
# Seems to me that a quantity called "NumberOfAverages" would be an
# int, but it is stored as a float, e.g. "128.0000" which makes
# Python's int() choke unless I run it through float() first.
params["averages"] = int(_float(ptag_img.get("NumberOfAverages", "")))
params["flip_angle"] = _float(ptag_img.get("FlipAngle", ""))
# [PS] DICOM stores frequency as MHz, we store it as Hz. Mega = 1x10(6)
params["frequency"] = float(ptag_img.get("ImagingFrequency", 0)) * 1e6
inversion_time = float(ptag_img.get("InversionTime", 0))
params["inversion_time_1"] = inversion_time
params["number_inversions"] = 1 if inversion_time else 0
params["measured_echoes"] = ptag_img.get("EchoTrainLength", "1")
params["nucleus"] = ptag_img.get("ImagedNucleus", "")
params["prescans"] = prot_ser.get("sSpecPara.lPreparingScans", 0)
# Gain
gain = prot_ser.get("sRXSPEC.lGain", None)
if gain == 0:
gain = "-20.0"
elif gain == 1:
gain = "0.0"
else:
gain = ""
params["receiver_gain"] = gain
params["ft_scale_factor"] = \
float(prot_ser.get("sRXSPEC.aFFT_SCALE[0].flFactor", 0))
# Receiver Coil
coil = prot_ser.get("sCOIL_SELECT_MEAS.asList[0].sCoilElementID.tCoilID", "")
params["receiver_coil"] = _extract_from_quotes(coil)
# [IDL] differs in EPSI
params["repetition_time_1"] = float(prot_ser.get("alTR[0]", 0)) * 0.001
sweep_width = ""
remove_oversample_flag = prot_ser.get("sSpecPara.ucRemoveOversampling", "")
remove_oversample_flag = (remove_oversample_flag.strip() == "0x1")
readout_os = float(ptag_ser.get("ReadoutOS", 1.0))
dwelltime = float(ptag_img.get("RealDwellTime", 1.0)) * 1e-9
if dwelltime:
sweep_width = 1 / dwelltime
if not remove_oversample_flag:
sweep_width *= readout_os
sweep_width = str(sweep_width)
params["transmitter_voltage"] = \
prot_ser.get("sTXSPEC.asNucleusInfo[0].flReferenceAmplitude", "0.0")
params["total_duration"] = \
prot_ser.get("lTotalScanTimeSec", "0.0")
prefix = "sSliceArray.asSlice[%d]." % slice_index
image_parameters = (
("image_dimension_line", "dPhaseFOV"),
("image_dimension_column", "dReadoutFOV"),
("image_dimension_partition", "dThickness"),
("image_position_sagittal", "sPosition.dSag"),
("image_position_coronal", "sPosition.dCor"),
("image_position_transverse", "sPosition.dTra"),
)
for key, name in image_parameters:
params[key] = float(prot_ser.get(prefix + name, "0.0"))
# [IDL] Image Normal/Column
image_orientation = ptag_img.get("ImageOrientationPatient", "")
if not image_orientation:
slice_orientation_pitch = ""
slice_distance = ""
else:
# image_orientation is a list of strings, e.g. --
# ['-1.00000000', '0.00000000', '0.00000000', '0.00000000',
# '1.00000000', '0.00000000']
# [IDL] If the data we are processing is a Single Voxel
# Spectroscopy data, interchange rows and columns. Due to an error
# in the protocol used.
if is_svs:
image_orientation = image_orientation[3:] + image_orientation[:3]
# Convert the values to float and discard ones smaller than 1e-4
f = lambda value: 0.0 if abs(value) < 1e-4 else value
image_orientation = [f(float(value)) for value in image_orientation]
row = image_orientation[:3]
column = image_orientation[3:6]
normal = ( ((row[1] * column[2]) - (row[2] * column[1])),
((row[2] * column[0]) - (row[0] * column[2])),
((row[0] * column[1]) - (row[1] * column[0])),
)
params["image_normal_sagittal"] = normal[0]
params["image_normal_coronal"] = normal[1]
params["image_normal_transverse"] = normal[2]
params["image_column_sagittal"] = column[0]
params["image_column_coronal"] = column[0]
params["image_column_transverse"] = column[0]
# Second part of the return tuple is orientation; we don't use it.
slice_orientation_pitch, _ = _dicom_orientation_string(normal)
# Slice distance
# http://en.wikipedia.org/wiki/Dot_product
keys = ("image_position_sagittal", "image_position_coronal",
"image_position_transverse")
a = [params[key] for key in keys]
b = normal
bb = math.sqrt(sum([value ** 2 for value in normal]))
slice_distance = ((a[0] * b[0]) + (a[1] * b[1]) + (a[2] * b[2])) / bb
params["slice_orientation_pitch"] = slice_orientation_pitch
params["slice_distance"] = slice_distance
regions = ( ("region_dimension_line", "dPhaseFOV"),
("region_dimension_column", "dReadoutFOV"),
("region_dimension_partition", "dThickness"),
("region_position_sagittal", "sPosition.dSag"),
("region_position_coronal", "sPosition.dCor"),
("region_position_transverse", "sPosition.dTra"),
)
for key, name in regions:
name = "sSpecPara.sVoI." + name
params[key] = float(prot_ser.get(name, 0))
# 'DATA INFORMATION'
params["measure_size_spectral"] = \
long(prot_ser.get('sSpecPara.lVectorSize', 0))
params["slice_thickness"] = _float(ptag_img.get("SliceThickness", 0))
params["current_slice"] = "1"
params["number_echoes"] = "1"
params["number_slices"] = "1"
params["data_size_spectral"] = params["measure_size_spectral"]
# ;------------------------------------------------------
# [IDL] Sequence Specific Changes
if not is_epsi:
# [IDL] Echo time - JPRESS handling added by Dragan
echo_time = 0.0
if is_jpress:
# [IDL] Yingjian saves echo time in a private 'echotime' field
# [PS] The IDL code didn't use a dict to store these values
# but instead did a brute force case-insensitive search over
# an array of strings. In that context, key case didn't matter
# but here it does.
keys = prot_ser.keys()
for key in keys:
if key.upper() == "ECHOTIME":
echo_time = float(prot_ser[key])
if is_svslip2:
# [IDL] BJS found TE value set in ICE to be updated in
# 'echotime' field
# [PS] The IDL code didn't use a dict to store these values
# but instead did a brute force case-insensitive search over
# an array of strings. In that context, key case didn't matter
# but here it does.
keys = ptag_img.keys()
for key in keys:
if key.upper() == "ECHOTIME":
echo_time = float(ptag_img[key])
if not echo_time:
# [IDL] still no echo time - try std place
echo_time = float(prot_ser.get('alTE[0]', 0.0))
echo_time /= 1000
params["echo_time"] = echo_time
params["data_size_line"] = \
int(prot_ser.get('sSpecPara.lFinalMatrixSizePhase', 1))
params["data_size_column"] = \
int(prot_ser.get('sSpecPara.lFinalMatrixSizeRead', 1))
params["data_size_partition"] = \
int(prot_ser.get('sSpecPara.lFinalMatrixSizeSlice', 1))
if is_svs:
# [IDL] For Single Voxel Spectroscopy data (SVS) only
params["image_dimension_line"] = \
params["region_dimension_line"]
params["image_dimension_column"] = \
params["region_dimension_column"]
params["image_dimension_partition"] = \
params["region_dimension_partition"]
# [IDL] For SVS data the following three parameters cannot be
# anything other than 1
params["measure_size_line"] = 1
params["measure_size_column"] = 1
params["measure_size_partition"] = 1
else:
# Not SVS
# ;--------------------------------------------------
# ; [IDL] For CSI or OTHER Spectroscopy data only
# ;--------------------------------------------------
measure_size_line = int(prot_ser.get('sKSpace.lPhaseEncodingLines', 1))
params["measure_size_line"] = str(measure_size_line)
measure_size_column = int(prot_ser.get('sKSpace.lPhaseEncodingLines', 0))
params["measure_size_column"] = str(measure_size_column)
measure_size_partition = int(prot_ser.get('sKSpace.lPartitions', '0'))
kspace_dimension = prot_ser.get('sKSpace.ucDimension', '')
if kspace_dimension.strip() == "0x2":
measure_size_partition = 1
params["data_size_partition"] = 1
data_size_partition = 1
params["measure_size_partition"] = measure_size_partition
if sequence_filename in ("svs_cp_press", "svs_se_ir", "svs_tavg"):
# [IDL] Inversion Type 0-Volume,1-None
s = prot_ser.get("SPREPPULSES.UCINVERSION", "")
if s == "0x1":
params["number_inversions"] = 1
elif s == "0x2":
params["number_inversions"] = 0
# else:
# params["number_inversions"] doesn't get set at all.
# This matches the behavior of the IDL code. Note that
# params["number_inversions"] is also populated
# unconditionally in code many lines above.
if sequence_filename in ("svs_se", "svs_st", "fid", "fid3", "fid_var",
"csi_se", "csi_st", "csi_fid", "csi_fidvar",
"epsi"):
# [IDL] FOR EPSI Measure_size and Data_size parameters are the same
params["region_dimension_line"] = \
params["image_dimension_line"]
params["region_dimension_column"] = \
params["image_dimension_column"]
params["ft_scale_factor"] = "1.0"
params["data_size_line"] = \
int(prot_ser.get('sKSpace.lPhaseEncodingLines', 0))
params["data_size_column"] = \
int(prot_ser.get('sKSpace.lBaseResolution', 0)) * readout_os
params["data_size_partition"] = \
int(prot_ser.get('sKSpace.lPartitions', 0))
params["measure_size_line"] = params["data_size_line"]
measure_size_column = params["data_size_column"]
measure_size_partition = params["data_size_partition"]
index = 0 if ((int(dataset.get("InstanceNumber", 0)) % 2) == 1) else 1
echo_time = float(prot_ser.get('alTE[%d]' % index, 0)) / 1000
repetition_time_1 = float(prot_ser.get('alTR[%d]' % index, 0)) / 1000
params["echo_time"] = str(echo_time)
params["repetition_time_1"] = str(repetition_time_1)
dwelltime = float(ptag_img.get("RealDwellTime", 0.0))
if dwelltime and base_resolution:
sweep_width = 1 / (dwelltime * base_resolution * readout_os)
else:
sweep_width = ""
params["sweep_width"] = sweep_width
# Added by BTA
ip_rot = prot_ser.get("sSliceArray.asSlice[0].dInPlaneRot", None)
pol_swap = prot_ser.get("sWipMemBlock.alFree[40]", None)
if ip_rot:
try:
ip_rot = float(ip_rot)
params["in_plane_rotation"] = ip_rot
except Exception, e:
pass
if pol_swap:
try:
pol_swap = int(pol_swap)
params["polarity_swap"] = pol_swap
except Exception, e:
raise e
return params
def _my_assert(expression):
if ASSERTIONS_ENABLED:
assert(expression)
def _dicom_orientation_string(normal):
"""Given a 3-item list (or other iterable) that represents a normal vector
to the "imaging" plane, this function determines the orientation of the
vector in 3-dimensional space. It returns a tuple of (angle, orientation)
in which angle is e.g. "Tra" or "Tra>Cor -6" or "Tra>Sag 14.1 >Cor 9.3"
and orientation is e.g. "Sag" or "Cor-Tra".
For double angulation, errors in secondary angle occur that may be due to
rounding errors in internal Siemens software, which calculates row and
column vectors.
"""
# docstring paraphrases IDL comments
TOLERANCE = 1.e-4
orientations = ('Sag', 'Cor', 'Tra')
final_angle = ""
final_orientation = ""
# [IDL] evaluate orientation of normal vector:
#
# Find principal direction of normal vector (i.e. axis with its largest
# component)
# Find secondary direction (second largest component)
# Calc. angle btw. projection of normal vector into the plane that
# includes both principal and secondary directions on the one hand
# and the principal direction on the other hand ==> 1st angulation:
# "principal>secondary = angle"
# Calc. angle btw. projection into plane perpendicular to principal
# direction on the one hand and secondary direction on the other
# hand ==> 2nd angulation: "secondary>third dir. = angle"
# get principal, secondary and ternary directions
sorted_normal = sorted(normal)
for i, value in enumerate(normal):
if value == sorted_normal[2]:
# [IDL] index of principal direction
principal = i
if value == sorted_normal[1]:
# [IDL] index of secondary direction
secondary = i
if value == sorted_normal[0]:
# [IDL] index of ternary direction
ternary = i
# [IDL] calc. angle between projection into third plane (spawned by
# principle & secondary directions) and principal direction:
angle_1 = math.atan2(normal[secondary], normal[principal]) * \
constants.RADIANS_TO_DEGREES
# [IDL] calc. angle btw. projection on rotated principle direction and
# secondary direction:
# projection on rotated principle dir.
new_normal_ip = math.sqrt((normal[principal] ** 2) + (normal[secondary] ** 2))
angle_2 = math.atan2(normal[ternary], new_normal_ip) * \
constants.RADIANS_TO_DEGREES
# [IDL] SIEMENS notation requires modifications IF principal dir. indxs SAG !
# [PS] In IDL, indxs is the name of the variable that is "secondary" here.
# Even with that substitution, I don't understand the comment above.
if not principal:
if abs(angle_1) > 0:
sign1 = angle_1 / abs(angle_1)
else:
sign1 = 1.0
angle_1 -= (sign1 * 180.0)
angle_2 *= -1
if (abs(angle_2) < TOLERANCE) or (abs(abs(angle_2) - 180) < TOLERANCE):
if (abs(angle_1) < TOLERANCE) or (abs(abs(angle_1) - 180) < TOLERANCE):
# [IDL] NON-OBLIQUE:
final_angle = orientations[principal]
final_orientation = ang
else:
# [IDL] SINGLE-OBLIQUE:
final_angle = "%s>%s %.3f" % \
(orientations[principal], orientations[secondary],
(-1 * angle_1)
)
final_orientation = orientations[principal] + '-' + orientations[secondary]
else:
# [IDL] DOUBLE-OBLIQUE:
final_angle = "%s>%s %.3f >%s %f" % \
(orientations[principal], orientations[secondary],
(-1 * angle_1), orientations[ternary], (-1 * angle_2))
final_orientation = "%s-%s-%s" % \
(orientations[principal], orientations[secondary],
orientations[ternary])
return final_angle, final_orientation
def _float(value):
"""Attempts to return value as a float. No different from Python's
built-in float(), except that it accepts None and "" (for which it
returns 0.0).
"""
return float(value) if value else 0.0
def _extract_from_quotes(s):
"""Given a string, returns the portion between the first and last
double quote (ASCII 34). If there aren't at least two quote characters,
the original string is returned."""
start = s.find('"')
end = s.rfind('"')
if (start != -1) and (end != -1):
s = s[start + 1 : end]
return s
def _null_truncate(s):
"""Given a string, returns a version truncated at the first '\0' if
there is one. If not, the original string is returned."""
i = s.find(chr(0))
if i != -1:
s = s[:i]
return s
def _scrub(item):
"""Given a string, returns a version truncated at the first '\0' and
stripped of leading/trailing whitespace. If the param is not a string,
it is returned unchanged."""
if isinstance(item, basestring):
return _null_truncate(item).strip()
else:
return item
def _get_chunks(tag, index, format, little_endian=True):
"""Given a CSA tag string, an index into that string, and a format
specifier compatible with Python's struct module, returns a tuple
of (size, chunks) where size is the number of bytes read and
chunks are the data items returned by struct.unpack(). Strings in the
list of chunks have been run through _scrub().
"""
# The first character of the format string indicates endianness.
format = ('<' if little_endian else '>') + format
size = struct.calcsize(format)
chunks = struct.unpack(format, tag[index:index + size])
chunks = [_scrub(item) for item in chunks]
return (size, chunks)
def _parse_protocol_data(protocol_data):
"""Returns a dictionary containing the name/value pairs inside the
"ASCCONV" section of the MrProtocol or MrPhoenixProtocol elements
of a Siemens CSA Header tag.
"""
# Protocol_data is a large string (e.g. 32k) that lists a lot of
# variables in a JSONish format with which I'm not familiar. Following
# that there's another chunk of data delimited by the strings you see
# below.
# That chunk is a list of name=value pairs, INI file style. We
# ignore everything outside of the ASCCONV delimiters. Everything inside
# we parse and return as a dictionary.
start = protocol_data.find("### ASCCONV BEGIN ###")
end = protocol_data.find("### ASCCONV END ###")
_my_assert(start != -1)
_my_assert(end != -1)
start += len("### ASCCONV BEGIN ###")
protocol_data = protocol_data[start:end]
lines = protocol_data.split('\n')
# The two lines of code below turn the 'lines' list into a list of
# (name, value) tuples in which name & value have been stripped and
# all blank lines have been discarded.
f = lambda pair: (pair[0].strip(), pair[1].strip())
lines = [f(line.split('=')) for line in lines if '=' in line]
return dict(lines)
def _get(dataset, tag, default=None):
"""Returns the value of a dataset tag, or the default if the tag isn't
in the dataset.
PyDicom datasets already have a .get() method, but it returns a
dicom.DataElement object. In practice it's awkward to call dataset.get()
and then figure out if the result is the default or a DataElement,
and if it is the latter _get the .value attribute. This function allows
me to avoid all that mess.
It is also a workaround for this bug (which I submitted) which should be
fixed in PyDicom > 0.9.3:
http://code.google.com/p/pydicom/issues/detail?id=72
"""
return default if tag not in dataset else dataset[tag].value
def _parse_csa_header(tag, little_endian = True):
"""The CSA header is a Siemens private tag that should be passed as
a string. Any of the following tags should work: (0x0029, 0x1010),
(0x0029, 0x1210), (0x0029, 0x1110), (0x0029, 0x1020), (0x0029, 0x1220),
(0x0029, 0x1120).
The function returns a dictionary keyed by element name.
"""
# Let's have a bit of fun, shall we? A Siemens CSA header is a mix of
# binary glop, ASCII, binary masquerading as ASCII, and noise masquerading
# as signal. It's also undocumented, so there's no specification to which
# to refer.
# The format is a good one to show to anyone who complains about XML being
# verbose or hard to read. Spend an afternoon with this and XML will
# look terse and read like a Shakespearean sonnet.
# The algorithm below is a translation of the GDCM project's
# CSAHeader::LoadFromDataElement() inside gdcmCSAHeader.cxx. I don't know
# how that code's author figured out what's in a CSA header, but the
# code works.
# I added comments and observations, but they're inferences. I might
# be wrong. YMMV.
# Some observations --
# - If you need to debug this code, a hexdump of the tag data will be
# your best friend.
# - The data in the tag is a list of elements, each of which contains
# zero or more subelements. The subelements can't be further divided
# and are either empty or contain a string.
# - Everything begins on four byte boundaries.
# - This code will break on big endian data. I don't know if this data
# can be big endian, and if that's possible I don't know what flag to
# read to indicate that. However, it's easy to pass an endianness flag
# to _get_chunks() should the need to parse big endian data arise.
# - Delimiters are thrown in here and there; they are 0x4d = 77 which is
# ASCII 'M' and 0xcd = 205 which has no ASCII representation.
# - Strings in the data are C-style NULL terminated.
# I sometimes read delimiters as strings and sometimes as longs.
DELIMITERS = ("M", "\xcd", 0x4d, 0xcd)
# This dictionary of elements is what this function returns
elements = { }
# I march through the tag data byte by byte (actually a minimum of four
# bytes at a time), and current points to my current position in the tag
# data.
current = 0
# The data starts with "SV10" followed by 0x04, 0x03, 0x02, 0x01.
# It's meaningless to me, so after reading it, I discard it.
size, chunks = _get_chunks(tag, current, "4s4s")
current += size
_my_assert(chunks[0] == "SV10")
_my_assert(chunks[1] == "\4\3\2\1")
# get the number of elements in the outer list
size, chunks = _get_chunks(tag, current, "L")
current += size
element_count = chunks[0]
# Eat a delimiter (should be 0x77)
size, chunks = _get_chunks(tag, current, "4s")
current += size
_my_assert(chunks[0] in DELIMITERS)
for i in range(element_count):
# Each element looks like this:
# - (64 bytes) Element name, e.g. ImagedNucleus, NumberOfFrames,
# VariableFlipAngleFlag, MrProtocol, etc. Only the data up to the
# first 0x00 is important. The rest is helpfully populated with
# noise that has enough pattern to make it look like something
# other than the garbage that it is.
# - (4 bytes) VM
# - (4 bytes) VR
# - (4 bytes) syngo_dt
# - (4 bytes) # of subelements in this element (often zero)
# - (4 bytes) a delimiter (0x4d or 0xcd)
size, chunks = _get_chunks(tag, current,
"64s" + "4s" + "4s" + "4s" + "L" + "4s")
current += size
name, vm, vr, syngo_dt, subelement_count, delimiter = chunks
_my_assert(delimiter in DELIMITERS)
# The subelements hold zero or more strings. Those strings are stored
# temporarily in the values list.
values = [ ]
for j in range(subelement_count):
# Each subelement looks like this:
# - (4 x 4 = 16 bytes) Call these four bytes A, B, C and D. For
# some strange reason, C is always a delimiter, while A, B and
# D are always equal to one another. They represent the length
# of the associated data string.
# - (n bytes) String data, the length of which is defined by
# A (and A == B == D).
# - (m bytes) Padding if length is not an even multiple of four.
size, chunks = _get_chunks(tag, current, "4L")
current += size
_my_assert(chunks[0] == chunks[1])
_my_assert(chunks[1] == chunks[3])
_my_assert(chunks[2] in DELIMITERS)
length = chunks[0]
# get a chunk-o-stuff, length indicated by code above.
# Note that length can be 0.
size, chunks = _get_chunks(tag, current, "%ds" % length)
current += size
if chunks[0]:
values.append(chunks[0])
# If we're not at a 4 byte boundary, move.
# Clever modulus code below swiped from GDCM
current += (4 - (length % 4)) % 4
# The value becomes a single string item (possibly "") or a list
# of strings
if len(values) == 0:
values = ""
if len(values) == 1:
values = values[0]
_my_assert(name not in elements)
elements[name] = values
return elements
| 39.048165 | 87 | 0.622173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17,445 | 0.512335 |
2cdefd8c563bbdcc05d1cd341d68e9bcaf7e3525 | 9,764 | py | Python | lib/nbrun.py | etalab/run-nb | 0e0f5f4d4508d09d95cef615c427eb64e93012bc | [
"MIT"
] | null | null | null | lib/nbrun.py | etalab/run-nb | 0e0f5f4d4508d09d95cef615c427eb64e93012bc | [
"MIT"
] | null | null | null | lib/nbrun.py | etalab/run-nb | 0e0f5f4d4508d09d95cef615c427eb64e93012bc | [
"MIT"
] | null | null | null | # Copyright (c) 2015-2017 Antonino Ingargiola
# License: MIT
"""
nbrun - Run an Jupyter/IPython notebook, optionally passing arguments.
USAGE
-----
Copy this file in the folder containing the master notebook used to
execute the other notebooks. Then use `run_notebook()` to execute
notebooks.
"""
import time
from pathlib import Path
from IPython.display import display, FileLink
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert import HTMLExporter
__version__ = '0.2'
def dict_to_code(mapping):
"""Convert input dict `mapping` to a string containing python code.
Each key is the name of a variable and each value is
the variable content. Each variable assignment is separated by
a newline.
Keys must be strings, and cannot start with a number (i.e. must be
valid python identifiers). Values must be objects with a string
representation (the result of repr(obj)) which is valid python code for
re-creating the object.
For examples, numbers, strings or list/tuple/dict
of numbers and strings are allowed.
Returns:
A string containing the python code.
"""
lines = ("{} = {}".format(key, repr(value))
for key, value in mapping.items())
return '\n'.join(lines)
def run_notebook(notebook_path, nb_kwargs=None, suffix='-out',
out_path_ipynb=None, out_path_html=None,
kernel_name=None, working_dir='./',
timeout=3600, execute_kwargs=None,
save_ipynb=True, save_html=False,
insert_pos=1, hide_input=False, display_links=True,
return_nb=False, add_timestamp=True):
"""Runs a notebook and saves the output in a new notebook.
Executes a notebook, optionally passing "arguments"
similarly to passing arguments to a function.
Notebook arguments are passed in a dictionary (`nb_kwargs`) which is
converted into a string containing python assignments. This string is
inserted in the template notebook as a code cell. The code assigns
variables which can be used to control the execution. When "calling"
a notebook, you need to know which arguments (variables) to pass.
Unlike normal python functions, no check is performed on the input
arguments. For sanity, we recommended describing the variables that
can be assigned using a markdown cell at the beginning of the template
notebook.
Arguments:
notebook_path (pathlib.Path or string): input notebook filename.
This is the notebook to be executed (i.e. template notebook).
nb_kwargs (dict or None): If not None, this dict is converted to a
string of python assignments using the dict keys as variables
names and the dict values as variables content. This string is
inserted as code-cell in the notebook to be executed.
suffix (string): suffix to append to the file name of the executed
notebook. Argument ignored if `out_notebook_path` is not None.
out_path_ipynb (pathlib.Path, string or None): file name for the
output ipynb notebook. If None, the ouput ipynb notebook has
the same name as the input notebook plus a suffix, specified
by the `suffix` argument. If not None, `suffix` is ignored.
If argument `save_ipynb` is False this argument is ignored.
out_path_html (pathlib.Path, string or None): file name for the
output HTML notebook. If None, the ouput HTML notebook has
the same name as the input notebook plus a suffix, specified
by the `suffix` argument. If not None, `suffix` is ignored.
If argument `save_html` is False this argument is ignored.
kernel_name (string or None): name of the kernel used to execute the
notebook. Use the default kernel if None.
working_dir (string or Path): the folder the kernel is started into.
timeout (int): max execution time (seconds) for each cell before the
execution is aborted.
execute_kwargs (dict): additional arguments passed to
`ExecutePreprocessor`.
save_ipynb (bool): if True, save the output notebook in ipynb format.
Default True.
save_html (bool): if True, save the output notebook in HTML format.
Default False.
insert_pos (int): position of insertion of the code-cell containing
the input arguments. Default is 1 (i.e. second cell). With this
default, the first cell of the input notebook can define default
argument values (used when the notebook is executed
with no arguments or through the Notebook App).
hide_input (bool): whether to create a notebook with input cells
hidden (useful to remind user that the auto-generated output
is not meant to have the code edited.
display_links (bool): if True, display/print "link" of template and
output notebooks. Links are only rendered in a notebook.
In a text terminal, links are displayed as full file names.
return_nb (bool): if True, returns the notebook object. If False
returns None. Default False.
add_timestamp (bool): if True, add a timestamp cell to the executed
notebook containing time of execution, duration and the name of
the template notebook.
"""
timestamp = ("**Executed:** %s<br>**Duration:** %d seconds.<br>"
"**Autogenerated from:** [%s](%s)\n\n---")
if nb_kwargs is None:
nb_kwargs = {}
else:
header = '# Cell inserted during automated execution.'
code = dict_to_code(nb_kwargs)
code_cell = '\n'.join((header, code))
notebook_path = Path(notebook_path)
if not notebook_path.is_file():
raise FileNotFoundError("Path '%s' not found." % notebook_path)
def check_out_path(notebook_path, out_path, ext, save):
if out_path is None:
out_path = Path(notebook_path.parent,
notebook_path.stem + suffix + ext)
out_path = Path(out_path)
if save and not out_path.parent.exists():
msg = "Folder of the output %s file was not found:\n - %s\n."
raise FileNotFoundError(msg % (ext, out_path_ipynb.parent))
return out_path
out_path_ipynb = check_out_path(notebook_path, out_path_ipynb,
ext='.ipynb', save=save_ipynb)
out_path_html = check_out_path(notebook_path, out_path_html,
ext='.html', save=save_html)
if display_links:
display(FileLink(str(notebook_path)))
if execute_kwargs is None:
execute_kwargs = {}
execute_kwargs.update(timeout=timeout)
if kernel_name is not None:
execute_kwargs.update(kernel_name=kernel_name)
ep = ExecutePreprocessor(**execute_kwargs)
nb = nbformat.read(str(notebook_path), as_version=4)
if hide_input:
nb["metadata"].update({"hide_input": True})
if len(nb_kwargs) > 0:
nb['cells'].insert(insert_pos, nbformat.v4.new_code_cell(code_cell))
start_time = time.time()
try:
# Execute the notebook
ep.preprocess(nb, {'metadata': {'path': working_dir}})
except:
# Execution failed, print a message then raise.
msg = ('Error executing the notebook "%s".\n'
'Notebook arguments: %s\n\n'
'See notebook "%s" for the traceback.' %
(notebook_path, str(nb_kwargs), out_path_ipynb))
print(msg)
timestamp += '\n\nError occurred during execution. See below.'
raise
finally:
if add_timestamp:
duration = time.time() - start_time
timestamp = timestamp % (time.ctime(start_time), duration,
notebook_path, out_path_ipynb)
timestamp_cell = nbformat.v4.new_markdown_cell(timestamp)
nb['cells'].insert(0, timestamp_cell)
# Save the executed notebook to disk
if save_ipynb:
nbformat.write(nb, str(out_path_ipynb))
if display_links:
display(FileLink(str(out_path_ipynb)))
if save_html:
html_exporter = HTMLExporter()
body, resources = html_exporter.from_notebook_node(nb)
with open(str(out_path_html), 'w') as f:
f.write(body)
if return_nb:
return nb
if __name__ == '__main__':
import argparse
descr = """\
Execute all notebooks in a folder saving the result in the "out"
subfolder.
"""
parser = argparse.ArgumentParser(description=descr, epilog='\n')
parser.add_argument('folder',
help='Source folder with files to be processed.')
msg = ('Name of kernel executing the notebook.\n'
'Use `jupyter kernelspec list` for a list of kernels.')
parser.add_argument('--kernel', metavar='KERNEL_NAME', default=None,
help=msg)
args = parser.parse_args()
folder = Path(args.folder)
assert folder.is_dir(), 'Folder "%s" not found.' % folder
out_path = Path(folder, 'out/')
if not out_path.is_dir():
out_path.mkdir(parents=True) # py2 compat
print('Executing notebooks in "%s" ... ' % folder)
pathlist = list(folder.glob('*.ipynb'))
for nbpath in pathlist:
if not (nbpath.stem.endswith('-out') or nbpath.stem.startswith('_')):
print()
out_path_ipynb = Path(out_path, nbpath.name)
run_notebook(nbpath, out_path_ipynb=out_path_ipynb,
kernel_name=args.kernel)
| 43.784753 | 77 | 0.645739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,649 | 0.578554 |
2cdf4d07ac3dc5fe02900f164bcf1938199e4ed3 | 254 | py | Python | Beginner/Day6/utilitiesmodule.py | vishipayyallore/LearningPython_2019 | f72d5af61ad96721442b7ebfc33518c2a879eb64 | [
"MIT"
] | null | null | null | Beginner/Day6/utilitiesmodule.py | vishipayyallore/LearningPython_2019 | f72d5af61ad96721442b7ebfc33518c2a879eb64 | [
"MIT"
] | null | null | null | Beginner/Day6/utilitiesmodule.py | vishipayyallore/LearningPython_2019 | f72d5af61ad96721442b7ebfc33518c2a879eb64 | [
"MIT"
] | null | null | null |
def banner(message, length, header='=', footer='*'):
print()
print(header * length)
print((' ' * (length//2 - len(message)//2)), message)
print(footer * length)
def banner_v2(length, footer='-'):
print(footer * length)
print()
| 21.166667 | 57 | 0.586614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.047244 |
2ce0692dbb9dbc53c64f62fdadf855b89afbf67f | 3,277 | py | Python | src/nninst/plot/heatmap_alexnet_imagenet_inter_class_similarity_frequency.py | uchuhimo/Ptolemy | 5c8ae188af30ee49d38f27d54c67af2eab9489e7 | [
"Apache-2.0"
] | 15 | 2020-08-24T07:11:20.000Z | 2021-09-13T08:03:42.000Z | src/nninst/plot/heatmap_alexnet_imagenet_inter_class_similarity_frequency.py | uchuhimo/Ptolemy | 5c8ae188af30ee49d38f27d54c67af2eab9489e7 | [
"Apache-2.0"
] | 5 | 2021-02-28T17:30:26.000Z | 2021-06-15T09:33:00.000Z | src/nninst/plot/heatmap_alexnet_imagenet_inter_class_similarity_frequency.py | uchuhimo/Ptolemy | 5c8ae188af30ee49d38f27d54c67af2eab9489e7 | [
"Apache-2.0"
] | 3 | 2020-10-22T09:11:11.000Z | 2021-01-16T14:49:34.000Z | import numpy as np
import pandas as pd
import seaborn as sns
from nninst.backend.tensorflow.model import AlexNet
from nninst.backend.tensorflow.trace.alexnet_imagenet_inter_class_similarity import (
alexnet_imagenet_inter_class_similarity_frequency,
)
from nninst.op import Conv2dOp, DenseOp
np.random.seed(0)
sns.set()
threshold = 0.5
frequency = int(500 * 0.1)
label = "import"
variant = None
base_name = f"alexnet_imagenet_inter_class_similarity_frequency_{frequency}"
cmap = "Greens"
same_class_similarity = []
diff_class_similarity = []
layer_names = []
layers = AlexNet.graph().load().ops_in_layers(Conv2dOp, DenseOp)
for layer_name in [
None,
*layers,
]:
similarity = alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer_name
).load()
same_class_similarity.append(
np.mean(similarity[np.eye(similarity.shape[0], dtype=bool)])
)
diff_class_similarity.append(
np.mean(
similarity[
np.tri(similarity.shape[0], similarity.shape[1], k=-1, dtype=bool)
]
)
)
if layer_name is None:
file_name = base_name
layer_names.append("All")
else:
file_name = base_name + "_" + layer_name[: layer_name.index("/")]
layer_names.append(layer_name[: layer_name.index("/")])
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
for layer_name, similarity in zip(
["avg", "first_half", "second_half"],
[
np.mean(
[
alexnet_imagenet_inter_class_similarity_frequency(
threshold, frequency, label, variant=variant, layer_name=layer
).load()
for layer in layers
],
axis=0,
),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[:len(layers) // 2]], axis=0),
# np.mean([alexnet_imagenet_inter_class_similarity_frequency(
# threshold, frequency, label, variant=variant, layer_name=layer
# ).load()
# for layer in layers[len(layers) // 2:]], axis=0),
],
):
file_name = base_name + "_" + layer_name
plot_array = np.around(similarity, decimals=2)
ax = sns.heatmap(plot_array, cmap=cmap, vmax=plot_array.max(), annot=True)
ax.set(xlabel="Class", ylabel="Class")
fig = ax.get_figure()
# fig.savefig(f"{file_name}.pdf", bbox_inches="tight")
fig.savefig(f"{file_name}.png", bbox_inches="tight")
# np.savetxt(f"{file_name}.csv", similarity, delimiter=",")
fig.clf()
summary_df = pd.DataFrame(
{
"Same Class": same_class_similarity,
"Diff Class": diff_class_similarity,
"Layer": layer_names,
}
)
summary_df.to_csv(f"{base_name}_summary.csv", index=False)
| 33.10101 | 85 | 0.646933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 886 | 0.270369 |
2ce1e3299abfe56cee528dffc7ce99df7cb83c6a | 3,070 | py | Python | spec/repositories/test_person.py | dooma/Events | 0c9556cae90ae9cdbacdbd0337c06df91cc72c13 | [
"MIT"
] | null | null | null | spec/repositories/test_person.py | dooma/Events | 0c9556cae90ae9cdbacdbd0337c06df91cc72c13 | [
"MIT"
] | null | null | null | spec/repositories/test_person.py | dooma/Events | 0c9556cae90ae9cdbacdbd0337c06df91cc72c13 | [
"MIT"
] | null | null | null | __author__ = 'Călin Sălăgean'
import unittest
from utils.IO import IO
from events.repositories.person import PersonRepository
from events.models.person import Person
class TestPersonRepository(unittest.TestCase):
def test_initialization(self):
io = IO('test.json')
io.set([])
repository = PersonRepository('test.json')
self.assertIsInstance(repository, PersonRepository)
def test_insert(self):
io = IO('test.json')
io.set([])
Person.set_class_id(0)
person = Person('Vasile', 'Pop', 'Str. Calea Floresti, nr. 24')
repo = PersonRepository('test.json')
repo.insert(person)
people = io.get()
person = people[0]
self.assertEqual(len(people), 1)
self.assertEqual(person['id'], 0)
self.assertEqual(person['first_name'], 'Vasile')
self.assertEqual(person['last_name'], 'Pop')
self.assertEqual(person['address'], 'Str. Calea Floresti, nr. 24')
def test_get_all(self):
io = IO('test.json')
io.set([])
Person.set_class_id(0)
person = Person('Vasile', 'Pop', 'Str. Calea Floresti, nr. 24')
repo = PersonRepository('test.json')
repo.insert(person)
people = repo.get_all()
self.assertEqual(len(people), 1)
person = people[0]
self.assertEqual(person.get_id(), 0)
self.assertEqual(person.get_name(), 'Vasile Pop')
self.assertEqual(person.get_address(), 'Str. Calea Floresti, nr. 24')
def test_get(self):
io = IO('test.json')
io.set([])
Person.set_class_id(10)
person = Person('Vasile', 'Pop', 'Str. Calea Floresti, nr. 24')
repo = PersonRepository('test.json')
repo.insert(person)
person = repo.get(10)
self.assertEqual(person.get_id(), 10)
self.assertEqual(person.get_name(), 'Vasile Pop')
self.assertEqual(person.get_address(), 'Str. Calea Floresti, nr. 24')
with self.assertRaisesRegex(ValueError, 'Person not found!'):
person = repo.get(0)
def test_update(self):
io = IO('test.json')
io.set([])
Person.set_class_id(10)
person = Person('Vasile', 'Pop', 'Str. Calea Floresti, nr. 24')
repo = PersonRepository('test.json')
repo.insert(person)
person = repo.get(10)
person.update('Dan', 'Popescu', 'Calea Dorobantilor')
repo.update(person)
updated_person = repo.get(10)
self.assertEqual(person.get_id(), 10)
self.assertEqual(person.get_name(), 'Dan Popescu')
self.assertEqual(person.get_address(), 'Calea Dorobantilor')
def test_delete(self):
io = IO('test.json')
io.set([])
Person.set_class_id(10)
person = Person('Vasile', 'Pop', 'Str. Calea Floresti, nr. 24')
repo = PersonRepository('test.json')
repo.insert(person)
repo.delete(person)
with self.assertRaisesRegex(ValueError, 'Person not found!'):
person = repo.get(10) | 30.098039 | 77 | 0.602932 | 2,902 | 0.944354 | 0 | 0 | 0 | 0 | 0 | 0 | 626 | 0.20371 |
2ce2a13441ae41fb3cffcc76633e1754ee418995 | 777 | py | Python | AGD_ST/search/util_visual/draw_histogram.py | Erfun76/AGD | c20755f7198b299c3ad080a1a1215b4f42100e5f | [
"MIT"
] | 52 | 2020-08-19T07:06:49.000Z | 2022-03-30T07:40:06.000Z | AGD_ST/search/util_visual/draw_histogram.py | Erfun76/AGD | c20755f7198b299c3ad080a1a1215b4f42100e5f | [
"MIT"
] | 12 | 2020-08-17T09:06:12.000Z | 2021-11-20T09:48:08.000Z | AGD_ST/search/util_visual/draw_histogram.py | Erfun76/AGD | c20755f7198b299c3ad080a1a1215b4f42100e5f | [
"MIT"
] | 9 | 2020-08-21T05:28:33.000Z | 2021-07-13T11:34:26.000Z | import numpy as np
from skimage.io import imread, imsave
import os
import sys
import matplotlib.pyplot as plt
def draw_hist(fname, save_folder):
img = imread(fname)
img_flat = np.reshape(np.array(img),[-1])
plt.clf()
plt.hist(img_flat)
plt.title('Color Distribution Histogram')
plt.xlabel('Pixel Value')
plt.ylabel('Frequency')
plt.savefig(os.path.join(save_folder, os.path.basename(fname)))
if __name__ == "__main__":
img_folder = sys.argv[1]
save_folder = sys.argv[2]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
fnames = os.listdir(img_folder)
for fname in fnames:
if 'png' in fname:
draw_hist(os.path.join(img_folder, fname), save_folder)
| 23.545455 | 68 | 0.644788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.088803 |
2ce31456540ab00747dedfd50b8822dd96f15a36 | 5,176 | py | Python | instrument_plugins/EGandG_Model5209.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | instrument_plugins/EGandG_Model5209.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | instrument_plugins/EGandG_Model5209.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | # EGandG_Model5209.py class, to perform the communication between the Wrapper and the device
# Martijn Schaafsma <qtlab@mcschaafsma.nl>, 2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from instrument import Instrument
import types
import logging
import numpy as np
from time import sleep
import visa
class EGandG_Model5209(Instrument):
'''
This is the driver for the Lockin
Usage:
Initialize with
<name> = instruments.create('<name>', 'EGandG_Model5209', address='<GBIP address>, reset=<bool>')
'''
def __init__(self, name, address, reset=False):
logging.info(__name__ + ' : Initializing instrument EG&G Model 5209')
Instrument.__init__(self, name, tags=['physical'])
self._address = address
#>>>>>>>>>>>>>>
assert False, "pyvisa syntax has changed, tweak the line below according to the instructions in qtlab/instrument_plugins/README_PYVISA_API_CHANGES"
#self._visainstrument = visa.instrument(self._address)
#<<<<<<<<<<<<<<
#self.init_default()
# Sensitivity
self._sen = 1.0
# Add functions
self.add_function('init_default')
self.add_function ('get_all')
self.add_function ('auto_measure')
self.add_function ('auto_phase')
# Add parameters
self.add_parameter('value',
flags=Instrument.FLAG_GET, units='V', type=types.FloatType,tags=['measure'])
self.add_parameter('frequency',
flags=Instrument.FLAG_GET, units='mHz', type=types.FloatType)
self.add_parameter('sensitivity',
flags=Instrument.FLAG_GETSET, units='', minval=1, maxval=15, type=types.IntType)
self.add_parameter('timeconstant',
flags=Instrument.FLAG_GETSET, units='', minval=1, maxval=15, type=types.IntType)
self.add_parameter('sensitivity_v',
flags=Instrument.FLAG_GETSET, units='V', minval=0.0, maxval=15.0, type=types.FloatType)
self.add_parameter('timeconstant_t',
flags=Instrument.FLAG_GETSET, units='s', minval=0.0, maxval=15.0, type=types.FloatType)
self.add_parameter('filter',
flags=Instrument.FLAG_GETSET, units='', minval=0, maxval=3, type=types.IntType)
if reset:
self.init_default()
#self.get_all()
self.get_sensitivity_v()
def _write(self, letter):
self._visainstrument.write(letter)
sleep(0.1)
def _ask(self, question):
return self._visainstrument.ask(question)
def get_all(self):
self.get_value()
self.get_frequency()
self.get_sensitivity()
self.get_timeconstant()
self.get_sensitivity_v()
self.get_timeconstant_t()
def init_default(self):
# self._write("ASM")
self._write("SEN 7")
self._write("XTC 3")
self._write("FLT 3")
def auto_measure(self):
self._write("ASM")
def auto_phase(self):
self._write("AQN")
def do_get_frequency(self):
stringval = self._ask("FRQ?")
return float(stringval)
def do_get_value(self):
stringval = self._ask("OUT?")
sd = stringval.split()
if len(sd)==2:
s=sd[0]
v = float(sd[1])
if (s=='-'):
v = -v
else:
v = float(sd[0])
return v*self._sen/10000.0
def do_get_sensitivity(self):
stringval = self._ask("SEN?")
self.get_sensitivity_v()
return int(stringval)
def do_set_sensitivity(self,val):
self._write("SEN %d"%val)
self.get_sensitivity()
def do_get_filter(self):
stringval = self._ask("FLT?")
print stringval
return int(stringval)
def do_set_filter(self,val):
self._write("FLT %d"%val)
def do_get_timeconstant(self):
stringval = self._ask("XTC?")
return int(stringval)
def do_set_timeconstant(self,val):
self._write("XTC %d"%val)
def do_get_sensitivity_v(self):
stringval = self._ask("SEN?")
n = int(stringval)
self._sen = pow(10,(int(n/2)-7+np.log10(3)*np.mod(n,2)))
return self._sen
def do_set_sensitivity_v(self,val):
n = np.log10(val)*2.0+13.99
if (np.mod(n,2) > 0.9525) & (np.mod(n,2) < 1.1):
n = n+0.1
self._write("SEN %d"%n)
self.get_sensitivity_v()
def do_get_timeconstant_t(self):
stringval = self._ask("XTC?")
n = int(stringval)
sen = pow(10,(int(n/2)-3+np.log10(3)*mod(n,2)/))
return sen
def do_set_timeconstant_t(self,val):
n = np.log10(val)*2.0+5.99
if (mod(n,2) > 0.9525) & (mod(n,2) < 1.1):
n = n+0.1
self._write("XTC %d"%n)
| 30.269006 | 153 | 0.651468 | 4,211 | 0.813563 | 0 | 0 | 0 | 0 | 0 | 0 | 1,661 | 0.320904 |
2ce52789e9c62be6a5f2d0514309edbb8a1eff3e | 6,747 | py | Python | scripts/addons/keentools_facebuilder/utils/materials.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | 2 | 2020-04-16T22:12:40.000Z | 2022-01-22T17:18:45.000Z | scripts/addons/keentools_facebuilder/utils/materials.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | null | null | null | scripts/addons/keentools_facebuilder/utils/materials.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | 2 | 2019-05-16T04:01:09.000Z | 2020-08-25T11:42:26.000Z | # ##### BEGIN GPL LICENSE BLOCK #####
# KeenTools for blender is a blender addon for using KeenTools in Blender.
# Copyright (C) 2019 KeenTools
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# ##### END GPL LICENSE BLOCK #####
import logging
import bpy
import numpy as np
from .. config import Config, get_main_settings
from .. fbloader import FBLoader
import keentools_facebuilder.blender_independent_packages.pykeentools_loader as pkt
from ..utils.images import find_bpy_image_by_name
def switch_to_mode(mode='MATERIAL'):
areas = bpy.context.workspace.screens[0].areas
for area in areas:
for space in area.spaces:
if space.type == 'VIEW_3D':
space.shading.type = mode
def toggle_mode(modes=('SOLID', 'MATERIAL')):
areas = bpy.context.workspace.screens[0].areas
for area in areas:
for space in area.spaces:
if space.type == 'VIEW_3D':
cur_mode = space.shading.type
ind = 0
if cur_mode in modes:
ind = modes.index(cur_mode)
ind += 1
if ind >= len(modes):
ind = 0
space.shading.type = modes[ind]
def assign_material_to_object(obj, mat):
if obj.data.materials:
obj.data.materials[0] = mat
else:
obj.data.materials.append(mat)
def get_mat_by_name(mat_name):
if bpy.data.materials.find(mat_name) >= 0:
return bpy.data.materials[mat_name]
new_mat = bpy.data.materials.new(mat_name)
new_mat.use_nodes = True
return new_mat
def get_shader_node(mat, find_type, create_name):
for node in mat.node_tree.nodes:
if node.type == find_type:
return node
return mat.node_tree.nodes.new(create_name)
def remove_mat_by_name(name):
mat_num = bpy.data.materials.find(name)
if mat_num >= 0:
bpy.data.materials.remove(bpy.data.materials[mat_num])
def show_texture_in_mat(tex_name, mat_name):
tex = find_bpy_image_by_name(tex_name)
mat = get_mat_by_name(mat_name)
principled_node = get_shader_node(
mat, 'BSDF_PRINCIPLED', 'ShaderNodeBsdfPrincipled')
image_node = get_shader_node(
mat, 'TEX_IMAGE', 'ShaderNodeTexImage')
image_node.image = tex
image_node.location = Config.image_node_layout_coord
principled_node.inputs['Specular'].default_value = 0.0
mat.node_tree.links.new(
image_node.outputs['Color'],
principled_node.inputs['Base Color'])
return mat
def _remove_bpy_texture_if_exists(tex_name):
logger = logging.getLogger(__name__)
tex_num = bpy.data.images.find(tex_name)
if tex_num >= 0:
logger.debug("TEXTURE WITH THAT NAME ALREADY EXISTS. REMOVING")
existing_tex = bpy.data.images[tex_num]
bpy.data.images.remove(existing_tex)
def _create_bpy_texture_from_img(img, tex_name):
logger = logging.getLogger(__name__)
assert(len(img.shape) == 3 and img.shape[2] == 4)
_remove_bpy_texture_if_exists(tex_name)
tex = bpy.data.images.new(
tex_name, width=img.shape[1], height=img.shape[0],
alpha=True, float_buffer=False)
tex.colorspace_settings.name = 'sRGB'
assert(tex.name == tex_name)
tex.pixels[:] = img.ravel()
tex.pack()
logger.debug("TEXTURE BAKED SUCCESSFULLY")
def _cam_image_data_exists(cam):
if not cam.cam_image:
return False
w, h = cam.cam_image.size[:2]
return w > 0 and h > 0
def _get_fb_for_bake_tex(headnum, head):
FBLoader.load_model(headnum)
fb = FBLoader.get_builder()
for i, m in enumerate(head.get_masks()):
fb.set_mask(i, m)
FBLoader.select_uv_set(fb, head.tex_uv_shape)
return fb
def _sRGB_to_linear(img):
img_rgb = img[:, :, :3]
img_rgb[img_rgb < 0.04045] = 25 * img_rgb[img_rgb < 0.04045] / 323
img_rgb[img_rgb >= 0.04045] = ((200 * img_rgb[img_rgb >= 0.04045] + 11) / 211) ** (12 / 5)
return img
def _create_frame_data_loader(settings, head, camnums, fb):
def frame_data_loader(kf_idx):
cam = head.cameras[camnums[kf_idx]]
w, h = cam.cam_image.size[:2]
img = np.rot90(
np.asarray(cam.cam_image.pixels[:]).reshape((h, w, 4)),
cam.orientation)
frame_data = pkt.module().texture_builder.FrameData()
frame_data.geo = fb.applied_args_model_at(cam.get_keyframe())
frame_data.image = img
frame_data.model = cam.get_model_mat()
frame_data.view = np.eye(4)
frame_data.projection = cam.get_projection_matrix()
return frame_data
return frame_data_loader
def bake_tex(headnum, tex_name):
logger = logging.getLogger(__name__)
settings = get_main_settings()
head = settings.get_head(headnum)
if not head.has_cameras():
logger.debug("NO CAMERAS ON HEAD")
return False
camnums = [cam_idx for cam_idx, cam in enumerate(head.cameras)
if cam.use_in_tex_baking and \
_cam_image_data_exists(cam) and \
cam.has_pins()]
frames_count = len(camnums)
if frames_count == 0:
logger.debug("NO FRAMES FOR TEXTURE BUILDING")
return False
fb = _get_fb_for_bake_tex(headnum, head)
frame_data_loader = _create_frame_data_loader(
settings, head, camnums, fb)
bpy.context.window_manager.progress_begin(0, 1)
class ProgressCallBack(pkt.module().ProgressCallback):
def set_progress_and_check_abort(self, progress):
bpy.context.window_manager.progress_update(progress)
return False
progress_callBack = ProgressCallBack()
built_texture = pkt.module().texture_builder.build_texture(
frames_count, frame_data_loader, progress_callBack,
settings.tex_height, settings.tex_width, settings.tex_face_angles_affection,
settings.tex_uv_expand_percents, settings.tex_back_face_culling,
settings.tex_equalize_brightness, settings.tex_equalize_colour, settings.tex_fill_gaps)
bpy.context.window_manager.progress_end()
_create_bpy_texture_from_img(built_texture, tex_name)
return True
| 32.4375 | 95 | 0.67719 | 202 | 0.029939 | 0 | 0 | 0 | 0 | 0 | 0 | 1,081 | 0.160219 |
2ce777497859d2197b79ee44dcd351d14e88fcd2 | 172 | py | Python | scripts/item/consume_2434951.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/item/consume_2434951.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/item/consume_2434951.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | # Soft-serve Damage Skin
success = sm.addDamageSkin(2434951)
if success:
sm.chat("The Soft-serve Damage Skin has been added to your account's damage skin collection.")
| 34.4 | 98 | 0.761628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.633721 |
2ce817a6e849abb570f9ff5f54594335a171ed3d | 2,918 | py | Python | ext/testlib/suite.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 135 | 2016-10-21T03:31:49.000Z | 2022-03-25T01:22:20.000Z | ext/testlib/suite.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 35 | 2017-03-10T17:57:46.000Z | 2022-02-18T17:34:16.000Z | ext/testlib/suite.py | mandaltj/gem5_chips | b9c0c602241ffda7851c1afb32fa01f295bb98fd | [
"BSD-3-Clause"
] | 48 | 2016-12-08T12:03:13.000Z | 2022-02-16T09:16:13.000Z | # Copyright (c) 2017 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Sean Wilson
import helper
import runner as runner_mod
class TestSuite(object):
'''
An object grouping a collection of tests. It provides tags which enable
filtering during list and run selection. All tests held in the suite must
have a unique name.
..note::
The :func:`__new__` method enables collection of test cases, it must
be called in order for test cases to be collected.
..note::
To reduce test definition boilerplate, the :func:`init` method is
forwarded all `*args` and `**kwargs`. This means derived classes can
define init without boilerplate super().__init__(*args, **kwargs).
'''
runner = runner_mod.SuiteRunner
collector = helper.InstanceCollector()
fixtures = []
tests = []
tags = set()
def __new__(klass, *args, **kwargs):
obj = super(TestSuite, klass).__new__(klass, *args, **kwargs)
TestSuite.collector.collect(obj)
return obj
def __init__(self, name=None, fixtures=tuple(), tests=tuple(),
tags=tuple(), **kwargs):
self.fixtures = self.fixtures + list(fixtures)
self.tags = self.tags | set(tags)
self.tests = self.tests + list(tests)
if name is None:
name = self.__class__.__name__
self.name = name
def __iter__(self):
return iter(self.tests) | 42.289855 | 77 | 0.718986 | 1,312 | 0.449623 | 0 | 0 | 0 | 0 | 0 | 0 | 2,113 | 0.724126 |
2ce9ea1882a265e53124e26b179ea50756f2193c | 9,163 | py | Python | src/downloaders/video.py | s0hvaperuna/playlist-checker | ce9ee4e603070c9bd892a9bec64e792d647618d2 | [
"MIT"
] | null | null | null | src/downloaders/video.py | s0hvaperuna/playlist-checker | ce9ee4e603070c9bd892a9bec64e792d647618d2 | [
"MIT"
] | null | null | null | src/downloaders/video.py | s0hvaperuna/playlist-checker | ce9ee4e603070c9bd892a9bec64e792d647618d2 | [
"MIT"
] | null | null | null | import io
import logging
import os
import re
import subprocess
import time
from dataclasses import dataclass
from random import uniform
import yt_dlp
from yt_dlp.utils import replace_extension, Popen, PostProcessingError
from src.config import MinMax
from src.config import get_yt_dlp_options
from src.db import models
logger = logging.getLogger('debug')
override_opts = get_yt_dlp_options()
SLEEP = MinMax(min=3, max=6)
@dataclass
class DownloadInfo:
filename: str
downloaded_format: str
success: bool
thumbnail_path: str = None
info_path: str = None
subtitle_paths: list[str] = None
blocked: bool = False
@classmethod
def failed(cls, blocked=False):
return cls('', '', False, blocked=blocked)
class Srv3SubtitlesConvertorAss(yt_dlp.FFmpegSubtitlesConvertorPP):
def __init__(self, downloader=None, keep_originals=True, converter_path=None):
super().__init__(downloader=downloader, format='ass')
self.keep_originals = keep_originals
self.converter_path = converter_path
self._ext = 'srv3'
def run(self, info):
if not self.converter_path:
files, info = super().run(info)
if self.keep_originals:
return [], info
return files, info
subs = info.get('requested_subtitles')
new_ext = self.format
if subs is None:
self.to_screen('There aren\'t any subtitles to convert')
return [], info
self.to_screen('Converting subtitles using YTSubConverter')
sub_filenames = []
converted = 0
subs_count = len(subs.keys())
for lang, sub in subs.items():
if not os.path.exists(sub.get('filepath', '')):
self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing')
continue
ext = sub['ext']
if ext == new_ext:
self.to_screen('Subtitle file for %s is already in the requested format' % new_ext)
continue
# This postprocessor only supports one kind of subtitle
elif ext != self._ext:
continue
old_file = sub['filepath']
if not self.keep_originals:
sub_filenames.append(old_file)
new_file = replace_extension(old_file, new_ext)
cmd = [self.converter_path, old_file, new_file, '--visual']
self.write_debug('YTSubConverter command line: %s' % yt_dlp.utils.shell_quote(cmd))
p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate_or_kill()
if p.returncode not in (0,):
stderr = stderr.decode('utf-8', 'replace').strip()
self.write_debug(stderr)
raise PostProcessingError(stderr.split('\n')[-1])
converted += 1
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
'ext': new_ext,
'data': f.read(),
'filepath': new_file,
}
info['__files_to_move'][new_file] = replace_extension(
info['__files_to_move'][sub['filepath']], new_ext)
if converted != subs_count:
files, info = super().run(info)
if not self.keep_originals:
sub_filenames.extend(files)
return sub_filenames, info
class SaveFilenamesPP(yt_dlp.postprocessor.PostProcessor):
"""Saves filenames (thumbnail and subtitles) to a DownloadInfo object before they are removed from the info dict."""
def __init__(self, download_info: DownloadInfo, downloader=None):
super().__init__(downloader)
self.download_info = download_info
@staticmethod
def get_thumbnail_path(info):
if not info.get('thumbnails'):
return None
idx = next((
-i for i, t in enumerate(info['thumbnails'][::-1], 1) if t.get('filepath')
), None)
if idx is None:
return None
thumbnail_filename = info['thumbnails'][idx]['filepath']
if not os.path.exists(yt_dlp.utils.encodeFilename(thumbnail_filename)):
return None
return thumbnail_filename
@staticmethod
def get_filepaths(data: list[dict]) -> list[str]:
return [t['filepath'] for t in data if 'filepath' in t]
def get_subtitle_paths(self, info):
filepaths = set()
if subtitles := info.get('subtitles'):
for subs in subtitles.values():
filepaths.update(self.get_filepaths(subs))
if requested_subs := info.get('requested_subtitles'):
filepaths.update(
self.get_filepaths(requested_subs.values())
)
if not filepaths:
return None
return list(filepaths)
def run(self, info):
self.download_info.thumbnail_path = self.get_thumbnail_path(info)
self.download_info.subtitle_paths = self.get_subtitle_paths(info)
return [], info
BASE_OPTS = {
# Max title length 200 bytes
'outtmpl': '%(title).200B [%(id)s].%(ext)s',
'format': 'bv*+ba/b',
'writeinfojson': True,
'writesubtitles': True,
'subtitlesformat': 'ass/srv3/ttml/best',
'writethumbnail': True,
'subtitleslangs': ['all'],
'postprocessors': [
{
'key': 'EmbedThumbnail',
# already_have_thumbnail = True prevents the file from being deleted after embedding
'already_have_thumbnail': True
},
{
# Embed metadata in video using ffmpeg.
# ℹ️ See yt_dlp.postprocessor.FFmpegMetadataPP for the arguments it accepts
'key': 'FFmpegMetadata',
'add_chapters': True,
'add_metadata': True,
'add_infojson': False
},
{
'key': 'FFmpegEmbedSubtitle',
# already_have_subtitle = True prevents the file from being deleted after embedding
'already_have_subtitle': True
},
{
'key': 'MetadataParser',
# Remove automatic captions from info json
'actions': [yt_dlp.MetadataFromFieldPP.to_action(':(?P<automatic_captions>)')],
'when': 'pre_process'
},
],
'merge_output_format': 'mp4',
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'noprogress': True,
'quiet': True,
'no_warnings': True,
'overwrites': False,
'logger': logger,
'no_color': True,
'fragment_retries': 10,
'continuedl': False,
'retries': 10,
**override_opts
}
def download_video(video, row: models.Video, opts, sleep: MinMax = SLEEP) -> DownloadInfo:
"""
Args:
video (src.video.BaseVideo): Video object of the row.
row: Database row with the columns download_filename, site
opts (dict): format options
sleep: How long to sleep after download
Returns:
"""
path = os.path.join('data', 'videos', str(row.site))
os.makedirs(path, exist_ok=True)
if row.force_redownload:
opts['overwrites'] = True
# Override default format
if row.download_format:
opts['format'] = row.download_format
logger.info(f'Downloading {video.video_id}')
# The default template should not cause any filename collision problems between sites
# as the chance of the title and id being the same on multiple sites is low (unless the video is exactly the same).
outtmpl = BASE_OPTS.get('outtmpl', yt_dlp.utils.DEFAULT_OUTTMPL['default'])
opts['outtmpl'] = os.path.join(path, outtmpl)
try:
with yt_dlp.YoutubeDL({**BASE_OPTS, **opts}) as ytdl:
dl_info = DownloadInfo.failed()
ytdl.add_post_processor(Srv3SubtitlesConvertorAss(converter_path=os.getenv('YT_SUBS_CONVERTER', None)), when='before_dl')
ytdl.add_post_processor(SaveFilenamesPP(dl_info), when='after_move')
info = ytdl.sanitize_info(ytdl.extract_info(video.link))
new_file = ytdl.prepare_filename(info)
downloaded_format = info.get('format', opts.get('format', BASE_OPTS.get('format', 'default')))
dl_info.filename = new_file
dl_info.success = True
dl_info.downloaded_format = downloaded_format
dl_info.info_path = ytdl.prepare_filename(info, 'infojson')
except yt_dlp.DownloadError as e:
time.sleep(uniform(sleep.min, sleep.max))
blocked = re.search(r'blocked in your|copyright grounds|video unavailable', e.msg, re.I) is not None
if blocked:
logger.warning(f'Video was blocked in your country. {e.msg}')
else:
logger.exception('Failed to dl vid')
return DownloadInfo.failed(blocked=blocked)
except:
logger.exception('Failed to dl vid')
time.sleep(uniform(sleep.min, sleep.max))
return DownloadInfo.failed()
time.sleep(uniform(sleep.min, sleep.max))
return dl_info
| 33.199275 | 133 | 0.615301 | 4,708 | 0.513581 | 0 | 0 | 939 | 0.102433 | 0 | 0 | 2,434 | 0.265518 |
2cebf204a510dbac577ed44449d7a53a945fbd9d | 7,984 | py | Python | nol/KNNFeatures.py | tlarock/nol | 39c9ec8bc8e05a91c623511302978d2de479c0ff | [
"MIT"
] | null | null | null | nol/KNNFeatures.py | tlarock/nol | 39c9ec8bc8e05a91c623511302978d2de479c0ff | [
"MIT"
] | null | null | null | nol/KNNFeatures.py | tlarock/nol | 39c9ec8bc8e05a91c623511302978d2de479c0ff | [
"MIT"
] | 1 | 2019-09-19T18:17:01.000Z | 2019-09-19T18:17:01.000Z | import numpy as np
def set_egonets(self, nodes = None):
"""
Updates the self.egonets data structure, which is a
dictionary indexed by node pointing to the induced subgraph
on the node and its neighbors. Also updates the egonet_edgecounts
for each node, used in a calculation later.
"""
if nodes is None:
nodes = self.sample_graph_adjlist.keys()
## set the egonet for each node
for node in nodes:
#if node not in self.egonets.keys():
egonet = dict()
## start with the neighbors of the node
egonet[node] = self.sample_graph_adjlist[node]
leaving_edgecount = 0
egonet_nodes = self.sample_adjlist_sets[node]
within_edgecount = len(egonet_nodes)
#else:
# egonet = self.egonets[node]
# leaving_edgecount = self.egonet_edgecounts[node]['leaving']
# within_edgecount = self.egonet_edgecounts[node]['within']
# egonet_nodes = egonet.keys() - self.sample_adjlist_sets[node]
#assert node not in egonet_nodes, 'node is in egonet_nodes!'
#print('node: ' + str(node) + ' neighbors: ' + str(egonet_nodes))
## for every neighbor, add links to nodes that are in the egonet
for neighbor in egonet_nodes:
if neighbor in self.sample_graph_adjlist.keys():
neighbors_of_neighbor = self.sample_adjlist_sets[neighbor]
egonet_neighbors = neighbors_of_neighbor.intersection(egonet_nodes)
egonet[neighbor] = {key:dict() for key in egonet_neighbors}
## update # of edges within the egonet
within_edgecount += len(egonet_neighbors)
## update # of edges leaving the egonet
if (len(self.sample_adjlist_sets[neighbor]) - len(egonet_neighbors)) > 0:
leaving_edgecount += len(self.sample_adjlist_sets[neighbor]) - len(egonet_neighbors) - 1
#print('neighbor: ' + str(neighbor) + ' current within edgecount: ' + str(within_edgecount) + ' current leaving edgecount: ' + str(leaving_edgecount) + ' egonet: ' + str(egonet[neighbor]))
else:
egonet[neighbor] = {node:dict()}
## set the egonet
self.egonets[node] = dict(egonet)
## set edgecount properties
self.egonet_edgecounts[node] = {'within':within_edgecount, 'leaving':leaving_edgecount}
#print('edgecounts: ' + str(self.egonet_edgecounts[node]))
def calculate_features(self, order='linear'):
"""
Calculates features from scratch. Use update_features for updates after a probe!
Using recursive features following ReFeX:
nodal: degree
egonet: triangles (# edges within), # edges going out, fraction probed neighbors
recursive: sums and averages of these features
"""
## compute neighborhood features
neighborhood_features = compute_neighborhood_features(self)
## compute recursive features
recursive_features = compute_recursive_features(self, neighborhood_features)
num_recursive_feats = len(recursive_features[0])
features = np.zeros( (len(self.node_to_row), num_recursive_feats) )
for node, row in self.node_to_row.items():
features[row] = recursive_features[row]
## store the non normalized features
self.F_no_normalization = features.copy()
## normalize by the max
max_feats = np.max(self.F_no_normalization, axis = 0)
min_feats = np.min(self.F_no_normalization, axis = 0)
normalization = max_feats - min_feats
normalization[normalization == 0] = 1
features = (self.F_no_normalization - min_feats) / normalization
self.NumF = features.shape[1]
return features
def update_features(self, node, order='linear'):
"""
Updates the feature matrix based on the node being probed.
"""
## get the nodes to update
tmp_nodes = self.sample_adjlist_sets[node].copy()
tmp_nodes.add(node)
nodes_to_update = set(tmp_nodes)
for u in tmp_nodes:
nodes_to_update.update(self.sample_adjlist_sets[u])
#nodes_to_update = list(self.sample_graph_adjlist.keys()) + [node]
## compute the features for these nodes
neighborhood_features = compute_neighborhood_features(self, nodes_to_update)
recursive_features = compute_recursive_features(self, neighborhood_features, nodes_to_update)
num_recursive_feats = self.F_no_normalization.shape[1]
## get the number of new nodes by comparing the old feature table
old_length = self.F_no_normalization.shape[0]
extension_length = len(self.node_to_row.keys()) - old_length
## Concatentate 0s to the feature matrix
self.F_no_normalization = np.concatenate( (self.F_no_normalization, np.zeros( (extension_length, num_recursive_feats) )))
## update the feature tables
for node in nodes_to_update:
row = self.node_to_row[node]
self.F_no_normalization[row] = recursive_features[row]
## normalize by the max
max_feats = np.max(self.F_no_normalization, axis = 0)
min_feats = np.min(self.F_no_normalization, axis = 0)
normalization = max_feats - min_feats
normalization[normalization == 0] = 1
features = (self.F_no_normalization - min_feats) / normalization
assert features.shape == self.F_no_normalization.shape, 'features.shape != no norm.shape!'
self.F = features
self.NumF = features.shape[1]
return features
def compute_neighborhood_features(self, nodes = None):
"""
Returns a dictionary indexed by node id of local features, both
nodal and egonet.
"""
if nodes is None:
nodes = self.sample_graph_adjlist.keys()
## neighborhood_features will be a dict of lists
## if there are features already
if self.F_no_normalization is not None:
## initialize the features from the unnormalized features
if len(self.node_to_row) != self.F_no_normalization.shape[0]:
extension_length = len(self.node_to_row) - self.F_no_normalization.shape[0]
neighborhood_features = np.vstack( (self.F_no_normalization, np.zeros( (extension_length, self.F_no_normalization.shape[1]))))
neighborhood_features = neighborhood_features[:,0:2]
else:
neighborhood_features = self.F_no_normalization[:,0:2]
else:
neighborhood_features = np.zeros((len(self.node_to_row), 2))
for node in nodes:
egonet = self.egonets[node]
row = self.node_to_row[node]
## egonet degree (same as regular degree)
degree = len(egonet[node])
self.D[row] = degree
number_probed_neighbors = len(self.probed_neighbors[node])
## set features
neighborhood_features[row] = np.array([degree, number_probed_neighbors])
return neighborhood_features
def compute_recursive_features(self, curr_features, nodes=None):
"""
Compute recursive features (averages and sums).
Returns dictionary of current features appended with recursive features.
"""
if nodes is None:
nodes = self.sample_graph_adjlist.keys()
new_features = np.hstack((curr_features, np.zeros((curr_features.shape[0], curr_features.shape[1]))))
## for each node
for node in nodes:
## get the egonet node feature matrix
egonet = self.egonets[node]
row = self.node_to_row[node]
neighbor_indices = [self.node_to_row[u] for u in egonet.keys() if u != node]
if len(neighbor_indices) > 0:
## compute means/sums
median = np.array(np.median(curr_features[neighbor_indices,0], axis = 0))
mean = np.array(np.mean(curr_features[neighbor_indices,0], axis = 0))
else:
## if there are no neighbors, all features are 0
mean = 0
median = 0
## add as features
recursive_features = np.array([mean, median])
new_features[row] = np.concatenate( (curr_features[row], recursive_features) )
return new_features
| 43.628415 | 204 | 0.675726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,571 | 0.322019 |
2cecc4379034e1c8d91bd34f47fbd53d6988aac0 | 857 | py | Python | crossvalidation_pipeline.py | ktian08/6784-drugs | 7c3ae9f65ce60b031008b0026bb9b954575315fa | [
"MIT"
] | 1 | 2020-06-13T00:40:21.000Z | 2020-06-13T00:40:21.000Z | crossvalidation_pipeline.py | ktian08/6784-drugs | 7c3ae9f65ce60b031008b0026bb9b954575315fa | [
"MIT"
] | null | null | null | crossvalidation_pipeline.py | ktian08/6784-drugs | 7c3ae9f65ce60b031008b0026bb9b954575315fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Andrew D. Rouillard
Computational Biologist
Target Sciences
GSK
andrew.d.rouillard@gsk.com
"""
import sys
import get_generalizable_features
import get_merged_features
import get_useful_features
def main(validation_rep=0, validation_fold=0):
print('VALIDATION_REP: {0!s}, VALIDATION_FOLD:{1!s}'.format(validation_rep, validation_fold), flush=True)
print('GETTING GENERALIZABLE FEATURES...', flush=True)
get_generalizable_features.main(validation_rep, validation_fold)
print('GETTING MERGED FEATURES...', flush=True)
get_merged_features.main(validation_rep, validation_fold)
print('GETTING USEFUL FEATURES...', flush=True)
get_useful_features.main(validation_rep, validation_fold)
if __name__ == '__main__':
main(validation_rep=int(sys.argv[1]), validation_fold=int(sys.argv[2]))
| 28.566667 | 109 | 0.753792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.312719 |
2ceda82e2a43820df794cf8b286e7af486b5effb | 4,818 | py | Python | locations/spiders/kona_grill.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | locations/spiders/kona_grill.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | locations/spiders/kona_grill.py | mfjackson/alltheplaces | 37c90b4041c80a574e6e4c2f886883e97df4b636 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
STATES = [
"AL",
"AK",
"AZ",
"AR",
"CA",
"CO",
"CT",
"DC",
"DE",
"FL",
"GA",
"HI",
"ID",
"IL",
"IN",
"IA",
"KS",
"KY",
"LA",
"ME",
"MD",
"MA",
"MI",
"MN",
"MS",
"MO",
"MT",
"NE",
"NV",
"NH",
"NJ",
"NM",
"NY",
"NC",
"ND",
"OH",
"OK",
"OR",
"PA",
"RI",
"SC",
"SD",
"TN",
"TX",
"UT",
"VT",
"VA",
"WA",
"WV",
"WI",
"WY",
]
WEEKDAYS = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]
class KonaGrillSpider(scrapy.Spider):
download_delay = 0.2
name = "konagrill"
item_attributes = {"brand": "Kona Grill", "brand_wikidata": "Q6428706"}
allowed_domains = ["konagrill.com"]
def start_requests(self):
url_by_state = "https://www.konagrill.com/ajax/getlocationsbystate"
headers = {"content-type": "application/x-www-form-urlencoded"}
# Get store id per state
for state in STATES:
yield scrapy.http.Request(
url_by_state,
method="POST",
body="state={}".format(state),
callback=self.parse,
headers=headers,
)
def parse(self, response):
store_data = json.loads(response.text)
url_location_details = "https://www.konagrill.com/ajax/getlocationdetails"
headers = {"content-type": "application/x-www-form-urlencoded"}
store_ids = []
if not store_data.get("data"):
return
store_ids += [s.get("id") for _, s in store_data.get("data").items()]
# Get store details
for i in store_ids:
yield scrapy.http.Request(
url_location_details,
method="POST",
body="id={}".format(i),
callback=self.parse_store,
headers=headers,
)
def parse_store(self, response):
response_data = json.loads(response.text)
if not response_data.get("data"):
return
store = response_data.get("data")
dh = store.get("dininghours")
# Data is inconsistent some keys were found with a trailing space
opening_hours = self.parse_hours(
dh.get("dining hours") or dh.get("dining hours ")
)
properties = {
"addr_full": store.get("address"),
"city": store.get("city"),
"extras": {
"email": store.get("email"),
},
"lat": store.get("latitude"),
"lon": store.get("longitude"),
"name": store.get("title"),
"opening_hours": opening_hours,
"phone": store.get("phone_number"),
"postcode": store.get("zip"),
"ref": store.get("id"),
"state": store.get("state"),
"website": store.get("order_online_url"),
}
yield GeojsonPointItem(**properties)
def parse_hours(self, hours):
oh = OpeningHours()
for t in hours:
# Some day entries contain invalid week data, e.g. "Brunch"
# "Brunch" is a special dining hour that is contained in regular hours, ignore it
if "Brunch" in t.get("days"):
continue
days = self.parse_days(t.get("days"))
open_time, close_time = t.get("hours").split("-")
ot = open_time.strip()
ct = close_time.strip()
for day in days:
oh.add_range(day=day, open_time=ot, close_time=ct, time_format="%I%p")
return oh.as_opening_hours()
def parse_days(self, days):
"""Parse day ranges and returns a list of days it represent
The following formats are considered:
- Single day, e.g. "Mon", "Monday"
- Range, e.g. "Mon-Fri", "Tue-Sund", "Sat-Sunday"
- Two days, e.g. "Sat & Sun", "Friday & Su"
Returns a list with the weekdays
"""
parsed_days = []
# Range
# Produce a list of weekdays between two days e.g. su-sa, mo-th, etc.
if "-" in days:
d = days.split("-")
r = [i.strip()[:2] for i in d]
s = WEEKDAYS.index(r[0].title())
e = WEEKDAYS.index(r[1].title())
if s <= e:
return WEEKDAYS[s : e + 1]
else:
return WEEKDAYS[s:] + WEEKDAYS[: e + 1]
# Two days
if "&" in days:
d = days.split("&")
return [i.strip()[:2].title() for i in d]
# Single days
else:
return [days.strip()[:2].title()]
| 26.472527 | 93 | 0.494396 | 4,099 | 0.850768 | 2,202 | 0.457036 | 0 | 0 | 0 | 0 | 1,538 | 0.31922 |
2cee532fa1ad8c3bab0846e524ce8d97c1e63a13 | 93 | py | Python | test/login.py | hongren798911/haha | 8b198b6e4ae3d992f2d1d7217b7532da3d557112 | [
"MIT"
] | null | null | null | test/login.py | hongren798911/haha | 8b198b6e4ae3d992f2d1d7217b7532da3d557112 | [
"MIT"
] | null | null | null | test/login.py | hongren798911/haha | 8b198b6e4ae3d992f2d1d7217b7532da3d557112 | [
"MIT"
] | null | null | null | num1 = 100
num2 = 200
num3 = 300
num4 = 400
num5 = 500
mum6 = 600
num7 = 700
num8 = 800
| 6.642857 | 10 | 0.602151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2cee64c765350c049d3f0289910d5b8f629efbd1 | 16,464 | py | Python | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/health_check.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/health_check.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/health_check.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | 1 | 2020-06-25T03:12:58.000Z | 2020-06-25T03:12:58.000Z | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class HealthCheck(object):
"""
Health checks monitor the status of your origin servers and only route traffic to the origins that pass the health check. If the health check fails, origin is automatically removed from the load balancing.
There is roughly one health check per EDGE POP per period. Any checks that pass will be reported as \"healthy\".
"""
#: A constant which can be used with the method property of a HealthCheck.
#: This constant has a value of "GET"
METHOD_GET = "GET"
#: A constant which can be used with the method property of a HealthCheck.
#: This constant has a value of "HEAD"
METHOD_HEAD = "HEAD"
#: A constant which can be used with the method property of a HealthCheck.
#: This constant has a value of "POST"
METHOD_POST = "POST"
#: A constant which can be used with the expected_response_code_group property of a HealthCheck.
#: This constant has a value of "2XX"
EXPECTED_RESPONSE_CODE_GROUP_2_XX = "2XX"
#: A constant which can be used with the expected_response_code_group property of a HealthCheck.
#: This constant has a value of "3XX"
EXPECTED_RESPONSE_CODE_GROUP_3_XX = "3XX"
#: A constant which can be used with the expected_response_code_group property of a HealthCheck.
#: This constant has a value of "4XX"
EXPECTED_RESPONSE_CODE_GROUP_4_XX = "4XX"
#: A constant which can be used with the expected_response_code_group property of a HealthCheck.
#: This constant has a value of "5XX"
EXPECTED_RESPONSE_CODE_GROUP_5_XX = "5XX"
def __init__(self, **kwargs):
"""
Initializes a new HealthCheck object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_enabled:
The value to assign to the is_enabled property of this HealthCheck.
:type is_enabled: bool
:param method:
The value to assign to the method property of this HealthCheck.
Allowed values for this property are: "GET", "HEAD", "POST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type method: str
:param path:
The value to assign to the path property of this HealthCheck.
:type path: str
:param headers:
The value to assign to the headers property of this HealthCheck.
:type headers: dict(str, str)
:param expected_response_code_group:
The value to assign to the expected_response_code_group property of this HealthCheck.
Allowed values for items in this list are: "2XX", "3XX", "4XX", "5XX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type expected_response_code_group: list[str]
:param is_response_text_check_enabled:
The value to assign to the is_response_text_check_enabled property of this HealthCheck.
:type is_response_text_check_enabled: bool
:param expected_response_text:
The value to assign to the expected_response_text property of this HealthCheck.
:type expected_response_text: str
:param interval_in_seconds:
The value to assign to the interval_in_seconds property of this HealthCheck.
:type interval_in_seconds: int
:param timeout_in_seconds:
The value to assign to the timeout_in_seconds property of this HealthCheck.
:type timeout_in_seconds: int
:param healthy_threshold:
The value to assign to the healthy_threshold property of this HealthCheck.
:type healthy_threshold: int
:param unhealthy_threshold:
The value to assign to the unhealthy_threshold property of this HealthCheck.
:type unhealthy_threshold: int
"""
self.swagger_types = {
'is_enabled': 'bool',
'method': 'str',
'path': 'str',
'headers': 'dict(str, str)',
'expected_response_code_group': 'list[str]',
'is_response_text_check_enabled': 'bool',
'expected_response_text': 'str',
'interval_in_seconds': 'int',
'timeout_in_seconds': 'int',
'healthy_threshold': 'int',
'unhealthy_threshold': 'int'
}
self.attribute_map = {
'is_enabled': 'isEnabled',
'method': 'method',
'path': 'path',
'headers': 'headers',
'expected_response_code_group': 'expectedResponseCodeGroup',
'is_response_text_check_enabled': 'isResponseTextCheckEnabled',
'expected_response_text': 'expectedResponseText',
'interval_in_seconds': 'intervalInSeconds',
'timeout_in_seconds': 'timeoutInSeconds',
'healthy_threshold': 'healthyThreshold',
'unhealthy_threshold': 'unhealthyThreshold'
}
self._is_enabled = None
self._method = None
self._path = None
self._headers = None
self._expected_response_code_group = None
self._is_response_text_check_enabled = None
self._expected_response_text = None
self._interval_in_seconds = None
self._timeout_in_seconds = None
self._healthy_threshold = None
self._unhealthy_threshold = None
@property
def is_enabled(self):
"""
Gets the is_enabled of this HealthCheck.
Enables or disables the health checks.
:return: The is_enabled of this HealthCheck.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this HealthCheck.
Enables or disables the health checks.
:param is_enabled: The is_enabled of this HealthCheck.
:type: bool
"""
self._is_enabled = is_enabled
@property
def method(self):
"""
Gets the method of this HealthCheck.
An HTTP verb (i.e. HEAD, GET, or POST) to use when performing the health check.
Allowed values for this property are: "GET", "HEAD", "POST", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The method of this HealthCheck.
:rtype: str
"""
return self._method
@method.setter
def method(self, method):
"""
Sets the method of this HealthCheck.
An HTTP verb (i.e. HEAD, GET, or POST) to use when performing the health check.
:param method: The method of this HealthCheck.
:type: str
"""
allowed_values = ["GET", "HEAD", "POST"]
if not value_allowed_none_or_none_sentinel(method, allowed_values):
method = 'UNKNOWN_ENUM_VALUE'
self._method = method
@property
def path(self):
"""
Gets the path of this HealthCheck.
Path to visit on your origins when performing the health check.
:return: The path of this HealthCheck.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this HealthCheck.
Path to visit on your origins when performing the health check.
:param path: The path of this HealthCheck.
:type: str
"""
self._path = path
@property
def headers(self):
"""
Gets the headers of this HealthCheck.
HTTP header fields to include in health check requests, expressed as `\"name\": \"value\"` properties. Because HTTP header field names are case-insensitive, any use of names that are case-insensitive equal to other names will be rejected. If Host is not specified, requests will include a Host header field with value matching the policy's protected domain. If User-Agent is not specified, requests will include a User-Agent header field with value \"waf health checks\".
**Note:** The only currently-supported header fields are Host and User-Agent.
:return: The headers of this HealthCheck.
:rtype: dict(str, str)
"""
return self._headers
@headers.setter
def headers(self, headers):
"""
Sets the headers of this HealthCheck.
HTTP header fields to include in health check requests, expressed as `\"name\": \"value\"` properties. Because HTTP header field names are case-insensitive, any use of names that are case-insensitive equal to other names will be rejected. If Host is not specified, requests will include a Host header field with value matching the policy's protected domain. If User-Agent is not specified, requests will include a User-Agent header field with value \"waf health checks\".
**Note:** The only currently-supported header fields are Host and User-Agent.
:param headers: The headers of this HealthCheck.
:type: dict(str, str)
"""
self._headers = headers
@property
def expected_response_code_group(self):
"""
Gets the expected_response_code_group of this HealthCheck.
The HTTP response codes that signify a healthy state.
- **2XX:** Success response code group.
- **3XX:** Redirection response code group.
- **4XX:** Client errors response code group.
- **5XX:** Server errors response code group.
Allowed values for items in this list are: "2XX", "3XX", "4XX", "5XX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The expected_response_code_group of this HealthCheck.
:rtype: list[str]
"""
return self._expected_response_code_group
@expected_response_code_group.setter
def expected_response_code_group(self, expected_response_code_group):
"""
Sets the expected_response_code_group of this HealthCheck.
The HTTP response codes that signify a healthy state.
- **2XX:** Success response code group.
- **3XX:** Redirection response code group.
- **4XX:** Client errors response code group.
- **5XX:** Server errors response code group.
:param expected_response_code_group: The expected_response_code_group of this HealthCheck.
:type: list[str]
"""
allowed_values = ["2XX", "3XX", "4XX", "5XX"]
if expected_response_code_group:
expected_response_code_group[:] = ['UNKNOWN_ENUM_VALUE' if not value_allowed_none_or_none_sentinel(x, allowed_values) else x for x in expected_response_code_group]
self._expected_response_code_group = expected_response_code_group
@property
def is_response_text_check_enabled(self):
"""
Gets the is_response_text_check_enabled of this HealthCheck.
Enables or disables additional check for predefined text in addition to response code.
:return: The is_response_text_check_enabled of this HealthCheck.
:rtype: bool
"""
return self._is_response_text_check_enabled
@is_response_text_check_enabled.setter
def is_response_text_check_enabled(self, is_response_text_check_enabled):
"""
Sets the is_response_text_check_enabled of this HealthCheck.
Enables or disables additional check for predefined text in addition to response code.
:param is_response_text_check_enabled: The is_response_text_check_enabled of this HealthCheck.
:type: bool
"""
self._is_response_text_check_enabled = is_response_text_check_enabled
@property
def expected_response_text(self):
"""
Gets the expected_response_text of this HealthCheck.
Health check will search for the given text in a case-sensitive manner within the response body and will fail if the text is not found.
:return: The expected_response_text of this HealthCheck.
:rtype: str
"""
return self._expected_response_text
@expected_response_text.setter
def expected_response_text(self, expected_response_text):
"""
Sets the expected_response_text of this HealthCheck.
Health check will search for the given text in a case-sensitive manner within the response body and will fail if the text is not found.
:param expected_response_text: The expected_response_text of this HealthCheck.
:type: str
"""
self._expected_response_text = expected_response_text
@property
def interval_in_seconds(self):
"""
Gets the interval_in_seconds of this HealthCheck.
Time between health checks of an individual origin server, in seconds.
:return: The interval_in_seconds of this HealthCheck.
:rtype: int
"""
return self._interval_in_seconds
@interval_in_seconds.setter
def interval_in_seconds(self, interval_in_seconds):
"""
Sets the interval_in_seconds of this HealthCheck.
Time between health checks of an individual origin server, in seconds.
:param interval_in_seconds: The interval_in_seconds of this HealthCheck.
:type: int
"""
self._interval_in_seconds = interval_in_seconds
@property
def timeout_in_seconds(self):
"""
Gets the timeout_in_seconds of this HealthCheck.
Response timeout represents wait time until request is considered failed, in seconds.
:return: The timeout_in_seconds of this HealthCheck.
:rtype: int
"""
return self._timeout_in_seconds
@timeout_in_seconds.setter
def timeout_in_seconds(self, timeout_in_seconds):
"""
Sets the timeout_in_seconds of this HealthCheck.
Response timeout represents wait time until request is considered failed, in seconds.
:param timeout_in_seconds: The timeout_in_seconds of this HealthCheck.
:type: int
"""
self._timeout_in_seconds = timeout_in_seconds
@property
def healthy_threshold(self):
"""
Gets the healthy_threshold of this HealthCheck.
Number of successful health checks after which the server is marked up.
:return: The healthy_threshold of this HealthCheck.
:rtype: int
"""
return self._healthy_threshold
@healthy_threshold.setter
def healthy_threshold(self, healthy_threshold):
"""
Sets the healthy_threshold of this HealthCheck.
Number of successful health checks after which the server is marked up.
:param healthy_threshold: The healthy_threshold of this HealthCheck.
:type: int
"""
self._healthy_threshold = healthy_threshold
@property
def unhealthy_threshold(self):
"""
Gets the unhealthy_threshold of this HealthCheck.
Number of failed health checks after which the server is marked down.
:return: The unhealthy_threshold of this HealthCheck.
:rtype: int
"""
return self._unhealthy_threshold
@unhealthy_threshold.setter
def unhealthy_threshold(self, unhealthy_threshold):
"""
Sets the unhealthy_threshold of this HealthCheck.
Number of failed health checks after which the server is marked down.
:param unhealthy_threshold: The unhealthy_threshold of this HealthCheck.
:type: int
"""
self._unhealthy_threshold = unhealthy_threshold
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.589041 | 479 | 0.672133 | 15,924 | 0.967201 | 0 | 0 | 15,954 | 0.969023 | 0 | 0 | 11,837 | 0.718963 |
2cee8c3838a909612dd4cc9396674d4f705e3fa3 | 409 | py | Python | misc/appveyor_filter.py | ppwwyyxx/taichi | ef0c3367bb06ad78b3457b8f93b5370f14b1d9c4 | [
"MIT"
] | 2 | 2020-10-22T14:57:47.000Z | 2020-10-24T07:30:47.000Z | misc/appveyor_filter.py | zf38473013/taichi | ad4d7ae04f4e559e84f6dee4a64ad57c3cf0c7fb | [
"MIT"
] | 3 | 2020-08-24T09:07:15.000Z | 2020-08-24T09:18:29.000Z | misc/appveyor_filter.py | zf38473013/taichi | ad4d7ae04f4e559e84f6dee4a64ad57c3cf0c7fb | [
"MIT"
] | 1 | 2020-09-29T17:56:48.000Z | 2020-09-29T17:56:48.000Z | import sys
import os
msg = os.environ["APPVEYOR_REPO_COMMIT_MESSAGE"]
if msg.startswith('[release]') or sys.version_info[1] == 6:
exit(
0
) # Build for this configuration (starts with '[release]', or python version is 3.6)
else:
print(
f'[appveyor_filer] Not build for [{msg}] with Python {sys.version[:5]}'
)
exit(1) # Do not build this configuration. See appveyor.yml
| 29.214286 | 89 | 0.657702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.599022 |
2cf1316f9ee5f955fd15c0e34a1d960d2f6a156f | 303 | py | Python | mysite/SocialApp/migrations/0003_delete_remotefollow.py | asmao7/Cmput404W2021 | 82c1f42492c93048d5f144e2bbb416764d78013b | [
"MIT"
] | 3 | 2021-01-20T18:23:14.000Z | 2021-02-22T19:38:46.000Z | mysite/SocialApp/migrations/0003_delete_remotefollow.py | asmao7/Cmput404W2021 | 82c1f42492c93048d5f144e2bbb416764d78013b | [
"MIT"
] | 24 | 2021-02-18T19:28:46.000Z | 2021-04-14T17:12:21.000Z | mysite/SocialApp/migrations/0003_delete_remotefollow.py | asmao7/Cmput404W2021 | 82c1f42492c93048d5f144e2bbb416764d78013b | [
"MIT"
] | 1 | 2021-05-13T04:43:00.000Z | 2021-05-13T04:43:00.000Z | # Generated by Django 3.1.6 on 2021-04-12 08:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('SocialApp', '0002_auto_20210411_2237'),
]
operations = [
migrations.DeleteModel(
name='RemoteFollow',
),
]
| 17.823529 | 49 | 0.613861 | 218 | 0.719472 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.320132 |
2cf319393483234ff687e05e713a56ff2a047833 | 6,688 | py | Python | api/tests/opentrons/protocol_runner/test_thread_async_queue.py | anuwrag/opentrons | 28c8d76a19e367c6bd38f5290faaa32abf378715 | [
"Apache-2.0"
] | 2 | 2015-11-10T17:49:51.000Z | 2016-01-15T04:43:37.000Z | api/tests/opentrons/protocol_runner/test_thread_async_queue.py | anuwrag/opentrons | 28c8d76a19e367c6bd38f5290faaa32abf378715 | [
"Apache-2.0"
] | null | null | null | api/tests/opentrons/protocol_runner/test_thread_async_queue.py | anuwrag/opentrons | 28c8d76a19e367c6bd38f5290faaa32abf378715 | [
"Apache-2.0"
] | null | null | null | """Tests for thread_async_queue."""
from __future__ import annotations
import asyncio
from concurrent.futures import ThreadPoolExecutor
from itertools import chain
from typing import List, NamedTuple
import pytest
from opentrons.protocol_runner.thread_async_queue import (
ThreadAsyncQueue,
QueueClosed,
)
def test_basic_single_threaded_behavior() -> None:
"""Test basic queue behavior in a single thread."""
subject = ThreadAsyncQueue[int]()
with subject:
subject.put(1)
subject.put(2)
subject.put(3)
# Putting isn't allowed after closing.
with pytest.raises(QueueClosed):
subject.put(4)
with pytest.raises(QueueClosed):
subject.put(5)
# Closing isn't allowed after closing.
with pytest.raises(QueueClosed):
subject.done_putting()
# Values are retrieved in order.
assert [subject.get(), subject.get(), subject.get()] == [1, 2, 3]
# After retrieving all values, further retrievals raise.
with pytest.raises(QueueClosed):
subject.get()
with pytest.raises(QueueClosed):
# If closing were naively implemented as a sentinel value being inserted
# into the queue, it might be that the first get() after the close
# correctly raises but the second get() doesn't.
subject.get()
def test_multi_thread_producer_consumer() -> None:
"""Stochastically smoke-test thread safety.
Use the queue to pass values between threads
in a multi-producer, multi-consumer setup.
Verify that all the values make it through in the correct order.
"""
num_producers = 3
num_consumers = 3
producer_ids = list(range(num_producers))
# The values that each producer will put into the queue.
# Anecdotally, threads interleave meaningfully with at least 10000 values.
values_per_producer = list(range(30000))
all_expected_values = [
_ProducedValue(producer_id=p, value=v)
for p in producer_ids
for v in values_per_producer
]
subject = ThreadAsyncQueue[_ProducedValue]()
# Run producers concurrently with consumers.
with ThreadPoolExecutor(max_workers=num_producers + num_consumers) as executor:
# `with subject` needs to be inside `with ThreadPoolExecutor`
# to avoid deadlocks in case something in here raises.
# Consumers need to see the queue closed eventually to terminate,
# and `with ThreadPoolExecutor` will wait until all threads are terminated
# before exiting.
with subject:
producers = [
executor.submit(
_produce,
queue=subject,
values=values_per_producer,
producer_id=producer_id,
)
for producer_id in producer_ids
]
consumers = [
executor.submit(_consume, queue=subject) for i in range(num_consumers)
]
# Ensure all producers are done before we exit the `with subject` block
# and close off the queue to further submissions.
for c in producers:
c.result()
consumer_results = [consumer.result() for consumer in consumers]
all_values = list(chain(*consumer_results))
# Assert that the total set of consumed values is as expected:
# No duplicates, no extras, and nothing missing.
assert sorted(all_values) == sorted(all_expected_values)
def assert_consumer_result_correctly_ordered(
consumer_result: List[_ProducedValue],
) -> None:
# Assert that the consumer got values in the order the producer provided them.
# Allow values from different producers to be interleaved,
# and tolerate skipped values (assume they were given to a different consumer).
# [[All consumed from producer 0], [All consumed from producer 1], etc.]
consumed_values_per_producer = [
[pv for pv in consumer_result if pv.producer_id == producer_id]
for producer_id in producer_ids
]
for values_from_single_producer in consumed_values_per_producer:
assert values_from_single_producer == sorted(values_from_single_producer)
for consumer_result in consumer_results:
assert_consumer_result_correctly_ordered(consumer_result)
async def test_async() -> None:
"""Smoke-test async support.
Use the queue to pass values
from a single async producer to a single async consumer,
running concurrently in the same event loop.
This verifies two things:
1. That async retrieval returns basically the expected values.
2. That async retrieval keeps the event loop free while waiting.
If it didn't, this test would reveal the problem by deadlocking.
We trust that more complicated multi-producer/multi-consumer interactions
are covered by the non-async tests.
"""
expected_values = list(range(1000))
subject = ThreadAsyncQueue[_ProducedValue]()
consumer = asyncio.create_task(_consume_async(queue=subject))
try:
with subject:
await _produce_async(queue=subject, values=expected_values, producer_id=0)
finally:
consumed = await consumer
assert consumed == [_ProducedValue(producer_id=0, value=v) for v in expected_values]
class _ProducedValue(NamedTuple):
producer_id: int
value: int
def _produce(
queue: ThreadAsyncQueue[_ProducedValue],
values: List[int],
producer_id: int,
) -> None:
"""Put values in the queue, tagged with an ID representing this producer."""
for v in values:
queue.put(_ProducedValue(producer_id=producer_id, value=v))
def _consume(queue: ThreadAsyncQueue[_ProducedValue]) -> List[_ProducedValue]:
"""Consume values from the queue indiscriminately until it's closed.
Return everything consumed, in the order that this function consumed it.
"""
result = []
for value in queue.get_until_closed():
result.append(value)
return result
async def _produce_async(
queue: ThreadAsyncQueue[_ProducedValue],
values: List[int],
producer_id: int,
) -> None:
"""Like `_produce()`, except yield to the event loop after each insertion."""
for value in values:
queue.put(_ProducedValue(producer_id=producer_id, value=value))
await asyncio.sleep(0)
async def _consume_async(
queue: ThreadAsyncQueue[_ProducedValue],
) -> List[_ProducedValue]:
"""Like _consume()`, except yield to the event loop while waiting."""
result = []
async for value in queue.get_async_until_closed():
result.append(value)
return result
| 33.273632 | 88 | 0.683014 | 69 | 0.010317 | 0 | 0 | 0 | 0 | 1,612 | 0.241029 | 2,530 | 0.378289 |
2cf440be4e7f718674833ee056299fd05b2abad8 | 3,062 | py | Python | courses/data_analysis/deepdive/composer-exercises/subdag_example_solution.py | pranaynanda/training-data-analyst | f10ab778589129239fd5b277cfdefb41638eded5 | [
"Apache-2.0"
] | null | null | null | courses/data_analysis/deepdive/composer-exercises/subdag_example_solution.py | pranaynanda/training-data-analyst | f10ab778589129239fd5b277cfdefb41638eded5 | [
"Apache-2.0"
] | null | null | null | courses/data_analysis/deepdive/composer-exercises/subdag_example_solution.py | pranaynanda/training-data-analyst | f10ab778589129239fd5b277cfdefb41638eded5 | [
"Apache-2.0"
] | null | null | null | """Solution for subdag_example.py.
Uses a factory function to return a DAG that can be used as the subdag argument
to SubDagOperator. Notice that:
1) the SubDAG's dag_id is formatted as parent_dag_id.subdag_task_id
2) the start_date and schedule_interval of the SubDAG are copied from the parent
DAG.
"""
from airflow import DAG
from airflow.contrib.operators.gcs_download_operator import GoogleCloudStorageDownloadOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
from datetime import datetime, timedelta
YESTERDAY = datetime.combine(datetime.today() - timedelta(days=1),
datetime.min.time())
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': YESTERDAY,
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
def shakespeare_subdag(parent_dag, subdag_task_id, play_name):
with DAG('{}.{}'.format(parent_dag.dag_id, subdag_task_id),
schedule_interval=parent_dag.schedule_interval,
start_date=parent_dag.start_date,
default_args=parent_dag.default_args) as subdag:
download = GoogleCloudStorageDownloadOperator(
task_id='download',
bucket='smenyc2018-subdag-data',
object='{}.enc'.format(play_name),
filename='/home/airflow/gcs/data/{}.enc'.format(play_name))
decrypt = BashOperator(
task_id='decrypt',
bash_command=
'openssl enc -in /home/airflow/gcs/data/{play_name}.enc '
'-out /home/airflow/gcs/data/{play_name}.txt -d -aes-128-cbc -k "hello-nyc"'
.format(play_name=play_name))
wordcount = BashOperator(
task_id='wordcount',
bash_command=
'wc -w /home/airflow/gcs/data/{play_name}.txt | tee /home/airflow/gcs/data/{play_name}_wordcount.txt'
.format(play_name=play_name))
download >> decrypt >> wordcount
return subdag
with DAG('subdag_example_solution', default_args=default_args,
catchup=False) as dag:
start = DummyOperator(task_id='start')
start >> SubDagOperator(task_id='process_romeo',
subdag=shakespeare_subdag(dag, 'process_romeo',
'romeo'))
start >> SubDagOperator(task_id='process_othello',
subdag=shakespeare_subdag(dag, 'process_othello',
'othello'))
start >> SubDagOperator(task_id='process_hamlet',
subdag=shakespeare_subdag(dag, 'process_hamlet',
'hamlet'))
start >> SubDagOperator(task_id='process_macbeth',
subdag=shakespeare_subdag(dag, 'process_macbeth',
'macbeth'))
| 43.126761 | 113 | 0.625408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 938 | 0.306336 |
2cf4f7aa04d5ae102d82d9c9bd6e398a5f525f06 | 5,230 | py | Python | src/export_blueprints.py | nutanixdev/export_blueprints | 5100dc3342c4b7d01b7fd4276fd69fc2ff150c5a | [
"MIT"
] | null | null | null | src/export_blueprints.py | nutanixdev/export_blueprints | 5100dc3342c4b7d01b7fd4276fd69fc2ff150c5a | [
"MIT"
] | null | null | null | src/export_blueprints.py | nutanixdev/export_blueprints | 5100dc3342c4b7d01b7fd4276fd69fc2ff150c5a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.8
"""
export_blueprints.py
Connect to a Nutanix Prism Central instance, grab all Calm blueprints and export them to JSON files.
You would need to *heavily* modify this script for use in a production environment so that it contains appropriate error-checking and exception handling.
"""
__author__ = "Chris Rasmussen @ Nutanix"
__version__ = "1.1"
__maintainer__ = "Chris Rasmussen @ Nutanix"
__email__ = "crasmussen@nutanix.com"
__status__ = "Development/Demo"
# default modules
import json
import getpass
import argparse
from time import localtime, strftime
import urllib3
# custom modules
import apiclient
def set_options():
global ENTITY_RESPONSE_LENGTH
"""
set ENTITY_RESPONSE_LENGTH to the maximum number of blueprints you want
to export
this is only required since the v3 list APIs will only return 20
entities by default
"""
ENTITY_RESPONSE_LENGTH = 50
def get_options():
global cluster_ip
global username
global password
# process the command-line arguments
parser = argparse.ArgumentParser(
description="Export all Calm blueprints to JSON files"
)
parser.add_argument("pc_ip", help="Prism Central IP address")
parser.add_argument("-u", "--username", help="Prism Central username")
parser.add_argument("-p", "--password", help="Prism Central password")
args = parser.parse_args()
# validate the arguments to make sure all required info has been supplied
if args.username:
username = args.username
else:
username = input("Please enter your Prism Central username: ")
if args.password:
password = args.password
else:
password = getpass.getpass()
cluster_ip = args.pc_ip
def main():
# set the global options
set_options()
# get the cluster connection info
get_options()
"""
disable insecure connection warnings
please be advised and aware of the implications
in a production environment!
"""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# make sure all required info has been provided
if not cluster_ip:
raise Exception("Cluster IP is required.")
elif not username:
raise Exception("Username is required.")
elif not password:
raise Exception("Password is required.")
else:
"""
do a preliminary check to see if this is AOS or CE
not used in this script but is could be useful for
later modifications
"""
client = apiclient.ApiClient(
"post",
cluster_ip,
"clusters/list",
'{ "kind": "cluster" }',
username,
password,
)
results = client.get_info()
is_ce = False
for cluster in results["entities"]:
if (
"-ce-"
in cluster["status"]["resources"]["config"]["build"]["full_version"]
):
is_ce = True
endpoints = {}
endpoints["blueprints"] = ["blueprint", (f'"length":{ENTITY_RESPONSE_LENGTH}')]
# get all blueprints
for endpoint in endpoints:
if endpoints[endpoint][1] != "":
client = apiclient.ApiClient(
"post",
cluster_ip,
(f"{endpoints[endpoint][0]}s/list"),
(
f'{{ "kind": "{endpoints[endpoint][0]}", {endpoints[endpoint][1]} }}'
),
username,
password,
)
else:
client = apiclient.ApiClient(
"post",
cluster_ip,
(f"{endpoints[endpoint][0]}s/list"),
(f'{{ "kind": "{endpoints[endpoint][0]}" }}'),
username,
password,
)
results = client.get_info()
# make sure the user knows what's happening ... ;-)
print(f"\n{len(results['entities'])} blueprints collected from {cluster_ip}\n")
'''
go through all the blueprints and export them to appropriately named files
filename will match the blueprint name and should work find if blueprint name contains spaces (tested on Ubuntu Linux)
'''
for blueprint in results["entities"]:
day = strftime("%d-%b-%Y", localtime())
time = strftime("%H%M%S", localtime())
blueprint_filename = f"{day}_{time}_{blueprint['status']['name']}.json"
client = apiclient.ApiClient(
"get",
cluster_ip,
f"blueprints/{blueprint['status']['uuid']}/export_file",
'{ "kind": "cluster" }',
username,
password,
)
exported_json = client.get_info()
with open(f"./{blueprint_filename}", "w") as f:
json.dump(exported_json, f)
print(
f"Successfully exported blueprint '{blueprint['status']['name']}'"
)
print("\nFinished!\n")
if __name__ == "__main__":
main()
| 30.231214 | 157 | 0.573231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,439 | 0.466348 |
2cf50032fd22989de13d200d7094ccf88a77e1bb | 3,945 | py | Python | public_ssl_drown_scanner/pyx509/pkcs7/asn1_models/X509_certificate.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | public_ssl_drown_scanner/pyx509/pkcs7/asn1_models/X509_certificate.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | public_ssl_drown_scanner/pyx509/pkcs7/asn1_models/X509_certificate.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z |
#* pyx509 - Python library for parsing X.509
#* Copyright (C) 2009-2010 CZ.NIC, z.s.p.o. (http://www.nic.cz)
#*
#* This library is free software; you can redistribute it and/or
#* modify it under the terms of the GNU Library General Public
#* License as published by the Free Software Foundation; either
#* version 2 of the License, or (at your option) any later version.
#*
#* This library is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#* Library General Public License for more details.
#*
#* You should have received a copy of the GNU Library General Public
#* License along with this library; if not, write to the Free
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#*
'''
Created on Dec 3, 2009
'''
# standard library imports
import string
# dslib imports
from pyasn1.type import tag,namedtype,univ,useful
from pyasn1 import error
# local imports
from tools import *
from oid import oid_map as oid_map
from general_types import *
class Extension(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
namedtype.NamedType('extnValue', univ.OctetString())
#namedtype.NamedType('extnValue', ExtensionValue())
)
class Extensions(univ.SequenceOf):
componentType = Extension()
sizeSpec = univ.SequenceOf.sizeSpec
class SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', ConvertibleBitString())
)
class UniqueIdentifier(ConvertibleBitString): pass
class Time(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
def __str__(self):
return str(self.getComponent())
class Validity(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('notBefore', Time()),
namedtype.NamedType('notAfter', Time())
)
class CertificateSerialNumber(univ.Integer): pass
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('v1', 0), ('v2', 1), ('v3', 2)
)
class TBSCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1', tagSet=Version.tagSet.tagExplicitly(tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))),
namedtype.NamedType('serialNumber', CertificateSerialNumber()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('validity', Validity()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('extensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class Certificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificate', TBSCertificate()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', ConvertibleBitString())
)
class Certificates(univ.SetOf):
componentType = Certificate()
| 37.932692 | 159 | 0.710773 | 2,756 | 0.698606 | 0 | 0 | 0 | 0 | 0 | 0 | 1,313 | 0.332826 |
2cf62110e3ab8800a99fcb288cf2cfd2fa6ffec9 | 141 | py | Python | Codewars/you're a square/you're a square.py | adoreblvnk/code_solutions | 03e4261241dd33a4232dabe0e9450d344f7ccc6d | [
"MIT"
] | null | null | null | Codewars/you're a square/you're a square.py | adoreblvnk/code_solutions | 03e4261241dd33a4232dabe0e9450d344f7ccc6d | [
"MIT"
] | null | null | null | Codewars/you're a square/you're a square.py | adoreblvnk/code_solutions | 03e4261241dd33a4232dabe0e9450d344f7ccc6d | [
"MIT"
] | null | null | null | from math import isqrt
is_square = lambda n: isqrt(n) ** 2 == n if n >= 0 else False
def is_square_soln(n):
pass
print(is_square(-1)) | 15.666667 | 61 | 0.659574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2cf720719acd6ee5090e6cba4b8337c7302d552b | 1,575 | py | Python | change_name.py | agk2000/catalyst_project | 6bae324f24d6d6382e84dcf1f2fedf0d896371e1 | [
"MIT"
] | 2 | 2022-01-12T16:34:25.000Z | 2022-03-30T09:48:33.000Z | solar_PV_utils/change_name.py | BensonRen/Drone_based_solar_PV_detection | 4b45307328d94fb7b1eafa318059ddcb86fda21f | [
"MIT"
] | null | null | null | solar_PV_utils/change_name.py | BensonRen/Drone_based_solar_PV_detection | 4b45307328d94fb7b1eafa318059ddcb86fda21f | [
"MIT"
] | 1 | 2021-09-11T14:55:26.000Z | 2021-09-11T14:55:26.000Z | # The function to change the name of a list of folders
# 2021.06.07 Ben wants to change a list of folder names that is too long for plotting
import numpy as np
import os
import shutil
name_change_dir_list = ['/scratch/sr365/Catalyst_data/every_10m/{}0m/images/save_root'.format(i) for i in range(5, 13)]
def change_folder_name(name_change_dir_list):
for name_change_dir in name_change_dir_list:
for folders in os.listdir(name_change_dir):
# Change the name
new_name = folders.split('catalyst')[-1].split('lr')[0].split('_')[1]+'_model'
print('old name is {}, change to {}'.format(folders, new_name))
os.rename(os.path.join(name_change_dir, folders), os.path.join(name_change_dir, new_name))
def append_name(mother_folder, name_starts_with='agg'):
"""
This function appends the folder name to the start of the individual file names
Typically this is for the
"""
for folder in os.listdir(mother_folder):
cur_folder = os.path.join(mother_folder, folder)
# Skip if this is not a folder
if not os.path.isdir(cur_folder):
continue
# For each subfolder, change the names of the files inside them
for file in os.listdir(cur_folder):
# If it does not start from NAME_STARTS_WITH, skip
if not file.startswith(name_starts_with):
continue
os.rename(os.path.join(cur_folder, file), os.path.join(cur_folder, folder + file))
if __name__ == '__main__':
append_name('/scratch/sr365/PR_curves/') | 41.447368 | 119 | 0.67619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 585 | 0.371429 |
2cf7266e9b31c455b918042035a8554f33eeaf4b | 143 | py | Python | test/unit/__init__.py | comtravo/grafana-dashboards | cd0e6f46408aebd2941ae4abc5b94e45124006a2 | [
"MIT"
] | 8 | 2020-12-09T13:14:53.000Z | 2022-01-29T01:56:30.000Z | test/unit/__init__.py | comtravo/grafana-dashboards | cd0e6f46408aebd2941ae4abc5b94e45124006a2 | [
"MIT"
] | 4 | 2021-02-24T08:49:14.000Z | 2022-01-22T18:17:32.000Z | test/unit/__init__.py | comtravo/grafana-dashboards | cd0e6f46408aebd2941ae4abc5b94e45124006a2 | [
"MIT"
] | null | null | null | """
tests module
"""
import os
import sys
import sure
ROOT_DIR = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(ROOT_DIR)
| 11.916667 | 59 | 0.685315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.188811 |
2cf7ede7519b76677c68ab0cf790c978c3c5cc8f | 203 | py | Python | genda/formats/__init__.py | jeffhsu3/genda | 5adbb5b5620c592849fa4a61126b934e1857cd77 | [
"BSD-3-Clause"
] | 5 | 2016-01-12T15:12:18.000Z | 2022-02-10T21:57:39.000Z | genda/formats/__init__.py | jeffhsu3/genda | 5adbb5b5620c592849fa4a61126b934e1857cd77 | [
"BSD-3-Clause"
] | 5 | 2015-01-20T04:22:50.000Z | 2018-10-02T19:39:12.000Z | genda/formats/__init__.py | jeffhsu3/genda | 5adbb5b5620c592849fa4a61126b934e1857cd77 | [
"BSD-3-Clause"
] | 1 | 2022-03-04T06:49:39.000Z | 2022-03-04T06:49:39.000Z | """ Formats submodule contains classes and functions
to parse various formats into pandas dataframes as
well as lookup utilities to various formats
"""
from .gene_utils import *
from .panVCF import VCF
| 25.375 | 52 | 0.79803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.743842 |
2cf8d4d1bb868ca0298242ce158fc4c9f8b561f1 | 6,046 | py | Python | src/api/bkuser_core/categories/plugins/plugin.py | Chace-wang/bk-user | 057f270d66a1834312306c9fba1f4e95521f10b1 | [
"MIT"
] | null | null | null | src/api/bkuser_core/categories/plugins/plugin.py | Chace-wang/bk-user | 057f270d66a1834312306c9fba1f4e95521f10b1 | [
"MIT"
] | null | null | null | src/api/bkuser_core/categories/plugins/plugin.py | Chace-wang/bk-user | 057f270d66a1834312306c9fba1f4e95521f10b1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Optional, Type
from uuid import UUID
import yaml
from bkuser_core.categories.constants import SyncTaskStatus
from bkuser_core.categories.loader import register_plugin
from bkuser_core.categories.models import ProfileCategory, SyncProgress, SyncTask
from bkuser_core.categories.plugins.base import LoginHandler, Syncer
from bkuser_core.categories.plugins.constants import HookType
from bkuser_core.common.models import is_obj_needed_update
from bkuser_core.user_settings.models import Setting, SettingMeta
from rest_framework import serializers
from typing_extensions import Protocol
logger = logging.getLogger(__name__)
class SyncRecordSLZ(serializers.Serializer):
detail = serializers.DictField(child=serializers.CharField())
success = serializers.BooleanField()
dt = serializers.DateTimeField()
class PluginHook(Protocol):
"""插件钩子,用于各种事件后的回调"""
def trigger(self, status: str, params: dict):
raise NotImplementedError
@dataclass
class DataSourcePlugin:
"""数据源插件,定义不同的数据源"""
name: str
syncer_cls: Type[Syncer]
# 绑定的目录类型
# 后期会将去掉目录类型的概念,只存在租户组和插件之间的直接对应关系
# 届时,将直接通过插件名获取,同时删除该变量
# TODO: remove me
category_type: Optional[str] = ""
# 额外配置,预留扩展
# 用于处理登录相关逻辑,目前只支持简单 check 逻辑
# 是否允许通过 SaaS 修改,默认不允许
allow_client_write: bool = field(default_factory=lambda: False)
login_handler_cls: Optional[Type[LoginHandler]] = None
settings_path: Optional[Path] = None
# 其他额外配置
extra_config: dict = field(default_factory=dict)
hooks: Dict[HookType, Type[PluginHook]] = field(default_factory=dict)
def register(self):
"""注册插件"""
register_plugin(self)
if self.settings_path is not None:
self.load_settings_from_yaml()
def init_settings(self, setting_meta_key: str, meta_info: dict):
namespace = meta_info.pop("namespace", "general")
try:
meta, created = SettingMeta.objects.get_or_create(
key=setting_meta_key, category_type=self.name, namespace=namespace, defaults=meta_info
)
if created:
logger.debug("\n------ SettingMeta<%s> of plugin<%s> created.", setting_meta_key, self.name)
except Exception: # pylint: disable=broad-except
logger.exception("SettingMeta<%s> of plugin<%s> can not been created.", setting_meta_key, self.name)
return
if is_obj_needed_update(meta, meta_info):
for k, v in meta_info.items():
setattr(meta, k, v)
try:
meta.save()
except Exception: # pylint: disable=broad-except
logger.exception("SettingMeta<%s> of plugin<%s> can not been updated.", setting_meta_key, self.name)
return
logger.debug("\n------ SettingMeta<%s> of plugin<%s> updated.", setting_meta_key, self.name)
# 默认在创建 meta 后创建 settings,保证新增的配置能够被正确初始化
if meta.default is not None:
# 理论上目录不能够被直接恢复, 所以已经被删除的目录不会被更新
# 仅做新增,避免覆盖已有配置
for category in ProfileCategory.objects.filter(type=self.category_type, enabled=True):
ins, created = Setting.objects.get_or_create(
meta=meta, category_id=category.id, defaults={"value": meta.default}
)
if created:
logger.debug("\n------ Setting<%s> of category<%s> created.", ins, category)
def load_settings_from_yaml(self):
"""从 yaml 中加载 SettingMeta 配置"""
with self.settings_path.open(mode="r") as f:
for key, meta_info in yaml.safe_load(f).items():
self.init_settings(key, meta_info)
def get_hook(self, type_: HookType) -> Optional[PluginHook]:
hook_cls = self.hooks.get(type_)
return hook_cls() if hook_cls else None
def sync(self, instance_id: int, task_id: UUID, *args, **kwargs):
"""同步数据"""
syncer = self.syncer_cls(category_id=instance_id)
category = syncer.category
task = SyncTask.objects.get(id=task_id)
progresses = SyncProgress.objects.init_progresses(category, task_id=task_id)
try:
syncer.sync(*args, **kwargs)
finally:
task_status = SyncTaskStatus.SUCCESSFUL.value
for item in syncer.context.report():
if not item.successful:
task_status = SyncTaskStatus.FAILED.value
progress = progresses[item.step]
fields = {
"status": SyncTaskStatus.SUCCESSFUL.value if item.successful else SyncTaskStatus.FAILED.value,
"successful_count": len(item.successful_items),
"failed_count": len(item.failed_items),
"logs": "\n".join(item.logs),
"failed_records": SyncRecordSLZ(item.failed_items, many=True).data,
}
for key, value in fields.items():
setattr(progress, key, value)
progress.save(update_fields=["status", "successful_count", "failed_count", "update_time"])
# 更新任务状态
task.status = task_status
task.save(update_fields=["status", "update_time"])
| 41.986111 | 116 | 0.662256 | 5,033 | 0.773356 | 0 | 0 | 4,688 | 0.720344 | 0 | 0 | 1,995 | 0.306546 |
2cf913172f053454c3779f2bdd6081d51582533f | 175 | py | Python | blog/templatetags/markdownify.py | darkLord19/blog | 16c3b72fef9d22ccfa606934c8b94fc0feb82103 | [
"MIT"
] | 12 | 2018-01-30T00:44:06.000Z | 2020-07-13T05:20:48.000Z | blog/templatetags/markdownify.py | darkLord19/blog | 16c3b72fef9d22ccfa606934c8b94fc0feb82103 | [
"MIT"
] | 36 | 2018-03-06T17:49:50.000Z | 2020-06-23T19:26:00.000Z | web/templatetags/markdown.py | odinje/yactff | d55ece2905ca49114f7ec15bbcd354cacb49b973 | [
"MIT"
] | 3 | 2018-08-03T07:03:09.000Z | 2020-07-09T20:21:10.000Z | from django import template
import mistune
register = template.Library()
@register.filter
def markdown(value):
markdown = mistune.Markdown()
return markdown(value)
| 15.909091 | 33 | 0.754286 | 0 | 0 | 0 | 0 | 98 | 0.56 | 0 | 0 | 0 | 0 |
2cf97b1d907f72ad5a0f43ac1cc591d832dc9a6f | 2,758 | py | Python | hummingbot/connector/exchange/k2/k2_in_flight_order.py | d3alek/hummingbot | 14d6c3b8c4d34c44079c45ef6cd05e9c192f241c | [
"Apache-2.0"
] | null | null | null | hummingbot/connector/exchange/k2/k2_in_flight_order.py | d3alek/hummingbot | 14d6c3b8c4d34c44079c45ef6cd05e9c192f241c | [
"Apache-2.0"
] | null | null | null | hummingbot/connector/exchange/k2/k2_in_flight_order.py | d3alek/hummingbot | 14d6c3b8c4d34c44079c45ef6cd05e9c192f241c | [
"Apache-2.0"
] | null | null | null | import asyncio
from decimal import Decimal
from typing import (
Any,
Dict,
Optional,
)
from hummingbot.connector.exchange.k2.k2_utils import convert_from_exchange_trading_pair
from hummingbot.connector.in_flight_order_base import InFlightOrderBase
from hummingbot.core.event.events import (
OrderType,
TradeType
)
class K2InFlightOrder(InFlightOrderBase):
def __init__(self,
client_order_id: str,
exchange_order_id: Optional[str],
trading_pair: str,
order_type: OrderType,
trade_type: TradeType,
price: Decimal,
amount: Decimal,
creation_timestamp: int,
initial_state: str = "New",
):
super().__init__(
client_order_id,
exchange_order_id,
trading_pair,
order_type,
trade_type,
price,
amount,
creation_timestamp,
initial_state,
)
self.last_executed_amount_base = Decimal("nan")
self.trade_id_set = set()
self.cancelled_event = asyncio.Event()
@property
def is_done(self) -> bool:
return self.last_state in {"Filled", "Cancelled"}
@property
def is_failure(self) -> bool:
return self.last_state in {"No Balance"}
@property
def is_cancelled(self) -> bool:
return self.last_state in {"Cancelled", "Expired"}
def update_with_trade_update(self, trade_update: Dict[str, Any]) -> bool:
"""
Update the InFlightOrder with the trade update from Private/GetHistory API endpoint
return: True if the order gets updated successfully otherwise False
"""
trade_id: str = str(trade_update["id"])
trade_order_id: str = str(trade_update["orderid"])
if trade_order_id != self.exchange_order_id or trade_id in self.trade_id_set:
return False
self.trade_id_set.add(trade_id)
trade_price: Decimal = Decimal(str(trade_update["price"]))
trade_amount: Decimal = Decimal(str(trade_update["amount"]))
if trade_update["type"] == "Buy":
self.executed_amount_base += trade_amount
self.executed_amount_quote += trade_price * trade_amount
else:
self.executed_amount_quote += trade_amount
self.executed_amount_base += trade_amount / trade_price
self.fee_paid += Decimal(str(trade_update["fee"]))
if not self.fee_asset:
base, quote = convert_from_exchange_trading_pair(trade_update["symbol"]).split("-")
self.fee_asset = base if trade_update["type"] == "Buy" else quote
return True
| 31.701149 | 95 | 0.617476 | 2,419 | 0.877085 | 0 | 0 | 294 | 0.106599 | 0 | 0 | 310 | 0.1124 |
2cf9a7344995d303c32d96dce9e5be80b38ca7df | 3,164 | py | Python | finite/storage/factom/__init__.py | FactomProject/finite | 6a55d815073d1015e21c3abe55eb1ca7ea75defa | [
"MIT"
] | null | null | null | finite/storage/factom/__init__.py | FactomProject/finite | 6a55d815073d1015e21c3abe55eb1ca7ea75defa | [
"MIT"
] | null | null | null | finite/storage/factom/__init__.py | FactomProject/finite | 6a55d815073d1015e21c3abe55eb1ca7ea75defa | [
"MIT"
] | null | null | null | import json
from finite.storage import new_uuid
class Unimplemented(Exception):
pass
class RoleFail(Exception):
pass
SUPERUSER = '*'
""" role used to bypass all permission checks """
ROOT_UUID = '00000000-0000-0000-0000-000000000000'
""" parent UUID used to initialize a stream """
DEFAULT_SCHEMA = 'base'
""" event schema to use if not provided """
class Storage(object):
SOURCE_HEADER = "from finite.storage.factom import Storage"
""" import line used to include this class in generated code """
EVENT = "_EVENT"
""" event table """
STATE = "_STATE"
""" state table """
@staticmethod
def reconnect(**kwargs):
""" create connection pool """
@staticmethod
def drop():
""" drop evenstore tables """
@staticmethod
def migrate():
""" create evenstore tables if missing """
def __init__(self, **kwargs):
""" set object uuid for storage instance """
# REVIEW: should chain be static?
print(kwargs)
def __call__(self, action, **kwargs):
""" append a new event """
# REVIEW: should chainid be a kwarg?
event_id = str(uuid.uuid4())
payload = None
new_state = None
err = None
try:
if 'multiple' in kwargs:
multiple = int(kwargs['multiple'])
else:
multiple = 1
if 'payload' in kwargs:
if isinstance(kwargs['payload'], dict):
payload = json.dumps(kwargs['payload'])
else:
# already json encoded string
payload = kwargs['payload']
else:
# cannot be null
payload = "{}"
def _txn():
# TODO access datastore
#cur.execute(sql.get_state, (self.oid, self.schema))
# FIXME
#previous = cur.fetchone()
raise Unimplemented("FIXME")
if not previous:
current_state = self.initial_vector()
parent = ROOT_UUID
else:
current_state = previous[2]
parent = previous[3]
new_state, role = self.transform(
current_state, action, multiple)
if role not in kwargs['roles'] and SUPERUSER not in kwargs['roles']:
raise RoleFail("Missing Required Role: " + role)
# TODO access datastore
# cur.execute(sql.set_state,
# (self.oid, self.schema, new_state, event_id, new_state, event_id, self.schema, self.oid)
# )
# cur.execute(sql.append_event,
# (event_id, self.oid, self.schema, action, multiple, payload, new_state, parent)
# )
_txn()
except Exception as x:
err = x
return event_id, new_state, err
def events(self):
""" list all events """
def event(self, uuid):
""" get a single event """
def state(self):
""" get state """
| 25.934426 | 109 | 0.520544 | 2,870 | 0.90708 | 0 | 0 | 231 | 0.073009 | 0 | 0 | 1,177 | 0.371997 |
2cfaa098a5bde08da247bf4b6a018a03f20be4ee | 704 | py | Python | bin/notify.py | nfischer/dotfiles | 40daa50f9375a987cf5c76606e34db08c1ed8a98 | [
"MIT"
] | 4 | 2016-08-30T03:56:31.000Z | 2017-08-16T02:46:49.000Z | bin/notify.py | nfischer/dotfiles | 40daa50f9375a987cf5c76606e34db08c1ed8a98 | [
"MIT"
] | 7 | 2017-09-16T06:32:57.000Z | 2018-07-04T01:15:28.000Z | bin/notify.py | nfischer/dotfiles | 40daa50f9375a987cf5c76606e34db08c1ed8a98 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import dbus
import sys
DEFAULT_TIMEOUT = 4000
def notify(summary, body='', app_name='', app_icon='',
timeout=DEFAULT_TIMEOUT, actions=[], hints=[], replaces_id=0):
_bus_name = 'org.freedesktop.Notifications'
_object_path = '/org/freedesktop/Notifications'
_interface_name = _bus_name
session_bus = dbus.SessionBus()
obj = session_bus.get_object(_bus_name, _object_path)
interface = dbus.Interface(obj, _interface_name)
interface.Notify(app_name, replaces_id, app_icon,
summary, body, actions, hints, timeout)
# If run as a script, just display the argv as summary
if __name__ == '__main__':
notify(summary=' '.join(sys.argv[1:]))
| 32 | 70 | 0.705966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.21733 |
2cfaf45dde9f79b4daea8e34b742698f034e176d | 685 | py | Python | code/exampleStrats/gradualtft.py | protonlaser91/PrisonersDilemmaTournament | 5a24aca5149b4768778de820eaaf6b096c4aff6d | [
"MIT"
] | null | null | null | code/exampleStrats/gradualtft.py | protonlaser91/PrisonersDilemmaTournament | 5a24aca5149b4768778de820eaaf6b096c4aff6d | [
"MIT"
] | null | null | null | code/exampleStrats/gradualtft.py | protonlaser91/PrisonersDilemmaTournament | 5a24aca5149b4768778de820eaaf6b096c4aff6d | [
"MIT"
] | null | null | null | import numpy as np
from random import random
def strategy(history, memory):
currentCount,defector,hasDefected = (0,0,False) if memory is None else memory
choice = 1
if currentCount > 0:
choice = 0
currentCount -= 1
return choice, (currentCount,defector,hasDefected)
elif currentCount > -2:
currentCount -= 1
return choice, (currentCount,defector,hasDefected)
else:
hasDefected = False
if history.shape[1] >= 1 and history[1,-1] == 0 and not hasDefected:
choice = 0
hasDefected = True
currentCount = defector
defector += 1
return choice, (currentCount,defector,hasDefected) | 27.4 | 81 | 0.643796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2cfb8abfe4a94bc0ccc1207406144720bd05773e | 5,558 | py | Python | indico/modules/oauth/provider_test.py | uxmaster/indico | ecd19f17ef6fdc9f5584f59c87ec647319ce5d31 | [
"MIT"
] | 1 | 2019-11-03T11:34:16.000Z | 2019-11-03T11:34:16.000Z | indico/modules/oauth/provider_test.py | NP-compete/indico | 80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549 | [
"MIT"
] | null | null | null | indico/modules/oauth/provider_test.py | NP-compete/indico | 80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import datetime, timedelta
from uuid import uuid4
import pytest
from flask import session
from mock import MagicMock
from oauthlib.oauth2 import InvalidClientIdError
from sqlalchemy.orm.exc import NoResultFound
from indico.modules.oauth.models.applications import OAuthApplication
from indico.modules.oauth.models.tokens import OAuthGrant
from indico.modules.oauth.provider import DisabledClientIdError, load_client, load_token, save_grant, save_token
pytest_plugins = 'indico.modules.oauth.testing.fixtures'
@pytest.fixture
def token_data():
return {'access_token': unicode(uuid4()),
'expires_in': 3600,
'refresh_token': '',
'scope': 'api'}
@pytest.fixture
def create_request(dummy_application, dummy_user):
def _create_request(implicit=False):
request = MagicMock()
request.grant_type = 'authorization_code' if not implicit else None
request.client.client_id = dummy_application.client_id
request.user = dummy_user
return request
return _create_request
@pytest.fixture
def dummy_request(create_request):
return create_request()
def test_load_client(dummy_application):
assert load_client(dummy_application.client_id) == dummy_application
def test_load_client_malformed_id():
with pytest.raises(InvalidClientIdError):
load_client('foobar')
def test_load_client_disabled_app(dummy_application):
dummy_application.is_enabled = False
with pytest.raises(DisabledClientIdError):
load_client(dummy_application.client_id)
@pytest.mark.usefixtures('request_context')
def test_save_grant(mocker, freeze_time):
freeze_time(datetime.utcnow())
mocker.patch.object(OAuthGrant, 'save')
request = MagicMock()
request.scopes = 'api'
request.redirect_uri = 'http://localhost:5000'
client_id = unicode(uuid4())
code = {'code': 'foobar'}
expires = datetime.utcnow() + timedelta(seconds=120)
grant = save_grant(client_id, code, request)
assert grant.client_id == client_id
assert grant.code == code['code']
assert grant.redirect_uri == request.redirect_uri
assert grant.user == session.user
assert grant.scopes == request.scopes
assert grant.expires == expires
assert grant.save.called
@pytest.mark.usefixtures('request_context')
@pytest.mark.parametrize('access_token', (True, False))
def test_load_token_no_access_token(dummy_application, dummy_token, token_data, access_token):
access_token = dummy_token.access_token if access_token else None
token = load_token(access_token)
if access_token:
assert token == dummy_token
else:
assert token is None
@pytest.mark.usefixtures('request_context')
def test_load_token_malformed_access_token(dummy_application, dummy_token, token_data):
assert load_token('foobar') is None
@pytest.mark.usefixtures('request_context')
@pytest.mark.parametrize('app_is_enabled', (True, False))
def test_load_token_disabled_app(dummy_application, dummy_token, token_data, app_is_enabled):
dummy_application.is_enabled = app_is_enabled
token = load_token(dummy_token.access_token)
if app_is_enabled:
assert token == dummy_token
else:
assert token is None
@pytest.mark.usefixtures('request_context')
@pytest.mark.parametrize('implicit', (True, False))
def test_save_token(create_request, create_user, token_data, implicit):
request = create_request(implicit=implicit)
session.user = create_user(1)
token = save_token(token_data, request)
assert request.user != session.user
assert token.user == session.user if implicit else request.user
assert token.access_token == token_data['access_token']
assert token.scopes == set(token_data['scope'].split())
assert 'expires_in' not in token_data
assert 'refresh_token' not in token_data
@pytest.mark.parametrize(('initial_scopes', 'requested_scopes', 'expected_scopes'), (
({}, 'a', {'a'}),
({}, 'a b', {'a', 'b'}),
({'a'}, 'a', {'a'}),
({'a'}, 'b', {'a', 'b'}),
({'a', 'b'}, 'a', {'a', 'b'}),
({'a', 'b'}, 'a b', {'a', 'b'}),
))
def test_save_token_scopes(dummy_request, create_token, token_data,
initial_scopes, requested_scopes, expected_scopes):
if initial_scopes:
create_token(scopes=initial_scopes)
token_data['scope'] = requested_scopes
initial_access_token = token_data['access_token']
token = save_token(token_data, dummy_request)
assert token.scopes == expected_scopes
if not set(requested_scopes.split()) - set(initial_scopes):
assert token_data['access_token'] != initial_access_token
else:
assert token_data['access_token'] == initial_access_token
@pytest.mark.parametrize('grant_type', ('foo', ''))
def test_save_token_invalid_grant(dummy_request, token_data, grant_type):
dummy_request.grant_type = grant_type
with pytest.raises(ValueError):
save_token(token_data, dummy_request())
def test_save_token_no_application(dummy_application, dummy_request, token_data):
dummy_request.client.client_id = unicode(uuid4())
assert not OAuthApplication.find(client_id=dummy_request.client.client_id).count()
with pytest.raises(NoResultFound):
save_token(token_data, dummy_request)
| 34.955975 | 112 | 0.725621 | 0 | 0 | 0 | 0 | 4,053 | 0.729219 | 0 | 0 | 753 | 0.13548 |
2cfcfb9e9b672e889b4728cb7b9faa88f7e34168 | 72 | py | Python | 2019/Python/Day_6/__init__.py | airstandley/AdventofCode | 86b7e289d67ba3ea31a78f4a4005253098f47254 | [
"MIT"
] | null | null | null | 2019/Python/Day_6/__init__.py | airstandley/AdventofCode | 86b7e289d67ba3ea31a78f4a4005253098f47254 | [
"MIT"
] | null | null | null | 2019/Python/Day_6/__init__.py | airstandley/AdventofCode | 86b7e289d67ba3ea31a78f4a4005253098f47254 | [
"MIT"
] | null | null | null | """
Day 6: Universal Orbit Map (https://adventofcode.com/2019/day/6)
""" | 24 | 64 | 0.680556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 1 |
2cfe6a6691ee44f3aa077c5ff3abfea13c8da13d | 1,031 | py | Python | tests/service/test_log_cloudwatch.py | wenhaizhu/FBPCS | cf103135acf44e879dab7c9819a5a8f0e22ef702 | [
"MIT"
] | null | null | null | tests/service/test_log_cloudwatch.py | wenhaizhu/FBPCS | cf103135acf44e879dab7c9819a5a8f0e22ef702 | [
"MIT"
] | null | null | null | tests/service/test_log_cloudwatch.py | wenhaizhu/FBPCS | cf103135acf44e879dab7c9819a5a8f0e22ef702 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock, patch
from fbpcs.service.log_cloudwatch import CloudWatchLogService
REGION = "us-west-1"
LOG_GROUP = "test-group-name"
LOG_PATH = "test-log-path"
class TestCloudWatchLogService(unittest.TestCase):
@patch("fbpcs.gateway.cloudwatch.CloudWatchGateway")
def test_fetch(self, MockCloudWatchGateway):
log_service = CloudWatchLogService(LOG_GROUP, REGION)
mocked_log = {"test-events": [{"test-event-name": "test-event-data"}]}
log_service.cloudwatch_gateway = MockCloudWatchGateway()
log_service.cloudwatch_gateway.fetch = MagicMock(return_value=mocked_log)
returned_log = log_service.cloudwatch_gateway.fetch(LOG_PATH)
log_service.cloudwatch_gateway.fetch.assert_called()
self.assertEqual(mocked_log, returned_log)
| 38.185185 | 81 | 0.758487 | 626 | 0.607177 | 0 | 0 | 571 | 0.553831 | 0 | 0 | 329 | 0.319108 |
2cffde54fce64df2346d3c12e08ed04887efeb6d | 129 | py | Python | build/ARM/arch/arm/ArmSemihosting.py | zhoushuxin/impl_of_HPCA2018 | 594d807fb0c0712bb7766122c4efe3321d012687 | [
"BSD-3-Clause"
] | 5 | 2019-12-12T16:26:09.000Z | 2022-03-17T03:23:33.000Z | build/ARM/arch/arm/ArmSemihosting.py | zhoushuxin/impl_of_HPCA2018 | 594d807fb0c0712bb7766122c4efe3321d012687 | [
"BSD-3-Clause"
] | null | null | null | build/ARM/arch/arm/ArmSemihosting.py | zhoushuxin/impl_of_HPCA2018 | 594d807fb0c0712bb7766122c4efe3321d012687 | [
"BSD-3-Clause"
] | null | null | null | version https://git-lfs.github.com/spec/v1
oid sha256:60c08155c02f8c1321979a81a67a2ef5a0bc292b2d26a9e5be7e6e1cb484e248
size 2756
| 32.25 | 75 | 0.883721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
fa01295cd72e1cc24e99fe0b555865e4e352a352 | 844 | py | Python | cqi_cpp/src/wrapper/discrete.py | AMR-/Conservative-Q-Improvement | f9d47b33fe757475d3216d3c406d147206738c90 | [
"MIT"
] | null | null | null | cqi_cpp/src/wrapper/discrete.py | AMR-/Conservative-Q-Improvement | f9d47b33fe757475d3216d3c406d147206738c90 | [
"MIT"
] | null | null | null | cqi_cpp/src/wrapper/discrete.py | AMR-/Conservative-Q-Improvement | f9d47b33fe757475d3216d3c406d147206738c90 | [
"MIT"
] | null | null | null | import numpy as np
from .space import Space
class Discrete(Space):
r"""A discrete space in :math:`\{ 0, 1, \\dots, n-1 \}`.
Example::
>>> Discrete(2)
"""
def __init__(self, n):
assert n >= 0
self.n = n
super(Discrete, self).__init__((), np.int64)
def sample(self):
return self.np_random.randint(self.n)
def contains(self, x):
if isinstance(x, int):
as_int = x
elif isinstance(x, (np.generic, np.ndarray)) and (x.dtype.kind in np.typecodes['AllInteger'] and x.shape == ()):
as_int = int(x)
else:
return False
return as_int >= 0 and as_int < self.n
def __repr__(self):
return "Discrete(%d)" % self.n
def __eq__(self, other):
return isinstance(other, Discrete) and self.n == other.n
| 24.823529 | 120 | 0.555687 | 798 | 0.945498 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.155213 |
fa01b60aac42da66a6f0eab342c49c8c76904fbf | 87 | py | Python | lab02/eurocv/apps.py | vascoalramos/tpw | e0d1ab14f1e701dd2b2a77522c57cb22fda85e56 | [
"MIT"
] | null | null | null | lab02/eurocv/apps.py | vascoalramos/tpw | e0d1ab14f1e701dd2b2a77522c57cb22fda85e56 | [
"MIT"
] | null | null | null | lab02/eurocv/apps.py | vascoalramos/tpw | e0d1ab14f1e701dd2b2a77522c57cb22fda85e56 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class EurocvConfig(AppConfig):
name = 'eurocv'
| 14.5 | 33 | 0.747126 | 50 | 0.574713 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.091954 |
fa025170f51f5c6eb69696b0c97480d3cf254f86 | 1,914 | py | Python | 2020/13/solution2.py | mitchellrj/adventofcode | e7a55d59a51292218b8849a7428fa32bd0371727 | [
"WTFPL"
] | null | null | null | 2020/13/solution2.py | mitchellrj/adventofcode | e7a55d59a51292218b8849a7428fa32bd0371727 | [
"WTFPL"
] | null | null | null | 2020/13/solution2.py | mitchellrj/adventofcode | e7a55d59a51292218b8849a7428fa32bd0371727 | [
"WTFPL"
] | null | null | null | import functools
import math
import operator
import sys
import time
def get_factors(n):
i = 2
factors = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(i)
if n > 1:
factors.add(n)
return factors
def main(departure_intervals, init):
# Sort of a lowest common denominator thing.
# Start with the biggest numbers
factors = set()
for i in departure_intervals:
factors |= get_factors(i)
print(factors)
sorted_departures = sorted(filter(lambda t: t[1], enumerate(departure_intervals)), reverse=True, key=lambda t: t[1])
big, big_index = sorted_departures[0]
mult_factor = math.floor(functools.reduce(operator.mul, factors, 1) / big)
print(f'mult_factor = {mult_factor}')
m = max(1, math.floor(init / big))
while True:
m += mult_factor
try_range = m % mult_factor + len(departure_intervals)
while m % mult_factor < try_range:
m += 1
n = (big * m) - big_index
#print(f'try {big} x {m} - {big_index} = {n}')
for idx, interval in sorted_departures[1:]:
if interval == 0:
continue
if (n + idx) % interval:
break
else:
return n + len(departure_intervals) - 1
def reader(fh):
departure_time = int(fh.readline())
departure_intervals = [0 if i == 'x' else int(i) for i in fh.readline().split(',')]
return departure_intervals
if __name__ == '__main__':
fname = sys.argv[1]
init = int((sys.argv[2:] + [0])[0])
with open(fname, 'r') as fh:
inputs = reader(fh)
start = time.monotonic_ns()
result = main(inputs, init)
end = time.monotonic_ns()
print(result)
print(f'Result calculated in {(end - start) / 1e3:0.3f} microseconds.', file=sys.stderr) | 27.342857 | 120 | 0.570533 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.12278 |
fa035cd8d58bb677605584ef22cf203bfd4ff3ef | 17,693 | py | Python | pyrosetta/models/_overrides.py | blockjoe/rosetta-api-client-python | 707f325f7560ffa6d5dfe361aff4779cc0b7182f | [
"Apache-2.0"
] | null | null | null | pyrosetta/models/_overrides.py | blockjoe/rosetta-api-client-python | 707f325f7560ffa6d5dfe361aff4779cc0b7182f | [
"Apache-2.0"
] | null | null | null | pyrosetta/models/_overrides.py | blockjoe/rosetta-api-client-python | 707f325f7560ffa6d5dfe361aff4779cc0b7182f | [
"Apache-2.0"
] | null | null | null | from textwrap import indent
from ._models import *
def str_SubNetworkIdentifier(self : SubNetworkIdentifier) -> str:
sn = "Subnetwork: {}".format(self.network)
if self.metadata:
md_h = "Additional Metadata:"
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
return "\n".join([sn, md_h, indent(md,' ')])
return sn
SubNetworkIdentifier.__str__ = str_SubNetworkIdentifier
def str_NetworkIdentifier(self : NetworkIdentifier) -> str:
bc = "Blockchain: {}".format(self.blockchain)
nw = "Network: {}".format(self.network)
if self.sub_network_identifier:
return "\n".join([bc, nw, str(self.sub_network_identifier)])
return "\n".join([bc, nw])
NetworkIdentifier.__str__ = str_NetworkIdentifier
def str_OperationStatus(self : OperationStatus) -> str:
status = "Status: {}".format(self.status)
successful = "Operation.Amount affects Operation.Account: {}".format(self.successful)
return "\n".join([status, successful])
OperationStatus.__str__ = str_OperationStatus
def str_Error(self : Error) -> str:
out = ["Error {}: {}".format(self.code, self.message)]
if self.description is not None:
out.append("Description: {}".format(self.description))
out.append("Retriable: {}".format(self.retriable))
if self.details:
out.append("Details:")
dets = "\n".join(["- {}: {}".format(key, val) for key, val in self.details.items()])
out.append(indent(dets,' '))
return "\n".join(out)
Error.__str__ = str_Error
def str_Currency(self : Currency) -> str:
sym = "Symbol: {}".format(self.symbol)
decimals = "Decimals of standard unit: {}".format(self.decimals)
if self.metadata:
md_h = "Additional Metadata:"
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
return "\n".join([sym, decimals, md_h, indent(md,' ')])
return "\n".join([sym, decimals])
Currency.__str__ = str_Currency
def str_BalanceExemption(self : BalanceExemption) -> str:
out = []
if self.sub_account_address is not None:
out.append("SubAccount Address: {}".format(self.sub_account_address))
if self.currency is not None:
out.append("Currency: {}".format(str(self.currency)))
if self.exemption_type is not None:
out.append("Exemption Type: {}".format(self.exemption_type))
BalanceExemption.__str__ = str_BalanceExemption
def str_Allow(self : Allow) -> str:
out = []
out.append("Suppported Operation Statuses:")
stats = "\n".join(["- {}".format(status) for status in self.operation_statuses])
out.append(indent(stats,' '))
out.append("Supported Operation Types:")
ts = "\n".join(["- {}".format(t) for t in self.operation_types])
out.append(indent(ts,' '))
out.append("Possible Errors:")
es = "\n".join(["- {}".format(str(e)) for e in self.errors])
out.append(indent(es,' '))
out.append("Historical Balance Lookup Supported: {}".format(self.historical_balance_lookup))
if self.timestamp_start_index is not None:
out.append("First valid block timestamp: {}".format(self.timestamp_start_index))
out.append("Supported /call Methods:")
ms = "\n".join(["- {}".format(m) for m in self.call_methods])
out.append(indent(ms,' '))
out.append("Account balances that can change without a corresponding Operation:")
bes = "\n".join(["- {}".format(be) for be in self.balance_exemptions])
out.append(indent(bes,' '))
out.append("Uspent coins can be updated based on mempool contents: {}".format(self.mempool_coins))
return "\n".join(out)
Allow.__str__ = str_Allow
def str_Version(self : Version) -> str:
out = ['Rosetta Version: {}'.format(self.rosetta_version)]
out.append('Node Version: {}'.format(self.node_version))
if self.middleware_version is not None:
out.append('Middleware Version: {}'.format(self.middleware_version))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
Version.__str__ = str_Version
def str_BlockIdentifier(self : BlockIdentifier) -> str:
return "Block Height: {}\nHash: {}".format(self.index, self.hash_)
BlockIdentifier.__str__ = str_BlockIdentifier
def str_SyncStatus(self : SyncStatus) -> str:
out = []
if self.current_index is not None:
out.append("Index of last synced block in current stage: {}".format(self.current_index))
if self.target_index is not None:
out.append("Index of target block to sync to in current stage".format(self.target_index))
if self.stage is not None:
out.append("Stage of sync process: {}".format(self.stage))
if self.synced is not None:
out.append("Synced up to most recent block: {}".format(self.synced))
return "\n".join(out)
SyncStatus.__str__ = str_SyncStatus
def str_Peer(self : Peer) -> str:
i = "id: {}".format(self.peer_id)
if self.metadata:
md_h = "Additional Metadata:"
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
return "\n".join([i, md_h, indent(md,' ')])
return i
Peer.__str__ = str_Peer
def str_NetworkOptionsResponse(self : NetworkOptionsResponse) -> str:
v = "Version Info:\n{}\n".format(indent(str(self.version),' '))
d = "Implementation Details:\n{}".format(indent(str(self.details),' '))
return "\n".join([v, d])
NetworkOptionsResponse.__str__ = str_NetworkOptionsResponse
def str_NetworkStatusResponse(self : NetworkStatusResponse) -> str:
out = ["Current Block:"]
out.append(indent(str(self.current_block_identifier),' '))
out.append("Current Block Timestamp: {}".format(self.current_block_timestamp))
out.append("Genesis Block:")
out.append(indent(str(self.genesis_block_identifier),' '))
if self.oldest_block_identifier is not None:
out.append("Oldest Block:")
out.append(indent(str(self.oldest_block_identifier),' '))
if self.sync_status is not None:
out.append("Sync Status:")
out.append(indent(str(self.sync_status),' '))
out.append("Peers:")
pl = "\n".join(["- {}".format(p) for p in self.peers])
out.append(indent(pl,' '))
return "\n".join(out)
NetworkStatusResponse.__str__ = str_NetworkStatusResponse
def str_Amount(self : Amount) -> str:
amt = '{} atomic units of {} (*B**{})'.format(self.value, self.currency.symbol, self.currency.decimals)
if self.metadata:
md_h = "Additional Metadata:"
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
return "\n".join([amt, indent(md_h,' '), indent(md, 4)])
return amt
Amount.__str__ = str_Amount
def str_AccountBalanceResponse(self : AccountBalanceResponse) -> str:
out = ["Block:"]
out.append(ident(str(self.block_identifier),' '))
out.append("Balances:")
for balance in self.balances:
out.append(indent("- {}".format(str(balance),' ')))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
AccountBalanceResponse.__str__ = str_AccountBalanceResponse
def str_Coin(self : Coin) -> str:
return "Balance of {} on coin_id: {}".format(self.amount, self.coin_identifier.identifier)
Coin.__str__ = str_Coin
def str_AccountCoinsResponse(self : AccountCoinsResponse) -> str:
out = ["Block:"]
out.append(ident(str(self.block_identifier),' '))
out.append("Coins:")
for coin in self.coins:
out.append(indent("- {}".format(str(coin),' ')))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
AccountCoinsResponse.__str__ = str_AccountCoinsResponse
def str_OperationIdentifier(self : OperationIdentifier) -> str:
idx = "Index: {}".format(self.index)
if self.network_index is not None:
net_idx = "Network Index: {}".format(self.network_index)
return "\n".join([idx, net_idx])
return idx
OperationIdentifier.__str__ = str_OperationIdentifier
def str_CoinChange(self : CoinChange) -> str:
return "{} with coin id: {}".format(self.coin_action, self.coin_identifier.identifier)
CoinChange.__str__ = str_CoinChange
def str_Operation(self : Operation) -> str:
out = ["Operation:"]
out.append(indent(str(self.operation_identifier),' '))
if related_operations is not None:
out.append("Related Operations:")
for related in self.related_operations:
out.append(indent(str(related),' '))
out.append("type: {}".format(self.type_))
if self.status is not None:
out.append("Status: {}".format(self.status))
if self.account is not None:
out.append("Account:")
out.append(indent(str(self.account),' '))
if self.amount is not None:
out.append("Amount:")
out.append(indent(str(self.amount),' '))
if self.coin_change is not None:
out.append("Coin Change: {}".format(self.coin_change))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
Operation.__str__ = str_Operation
def str_RelatedTransaction(self : RelatedTransaction) -> str:
out = []
if self.network_identifier is not None:
out.append("Network:")
out.append(indent(str(self.network_identifier),' '))
out.append("Transaction: {}".format(self.transaction_identifier.hash))
out.append("Direction: {}".format(self.direction))
return "\n".join(out)
RelatedTransaction.__str__ = str_RelatedTransaction
def str_Transaction(self : Transaction) -> str:
out = ["Transaction id: {}".format(self.transaction_identifier.hash)]
out.append("Operations:")
for operation in self.operations:
op = "- {}".format(operation)
out.append(indent(op,' '))
if self.related_transactions is not None:
out.append("Related Transactions:")
for related in self.related_transactions:
rt = "- {}".format(related)
out.append(indent(rt,' '))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
Transaction.__str__ = str_Transaction
def str_Block(self : Block) -> str:
out = ["Block:"]
out.append(indent(str(self.block_identifier),' '))
out.append("Parent Block:")
out.append(indent(str(self.parent_block_identifier),' '))
out.append("Timestamp: {}".format(self.timestamp))
out.append("Transactions:")
for tran in self.transactions:
t = "- {}".format(tran)
out.append(indent(t,' '))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
Block.__str__ = str_Block
def str_BlockResponse(self : BlockResponse) -> str:
out = []
if self.block is not None:
out.append("Block")
out.append(indent(str(self.block),' '))
if self.other_transactions is not None:
out.append("Other Transactions:")
for tid in self.other_transactions:
ot = "- {}".format(tid.hash)
out.append(indent(ot,' '))
if out:
return "\n".join(out)
return ""
BlockResponse.__str__ = str_BlockResponse
def str_MempoolTransactionResponse(self : MempoolTransactionResponse) -> str:
out = ["Transaction:"]
out.append(indent(str(self.transaction),' '))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
MempoolTransactionResponse.__str__ = str_MempoolTransactionResponse
def str_ConstructionDeriveResponse(self : ConstructionDeriveResponse) -> str:
out = []
if self.address is not None:
out.append("Address: {}".format(self.address))
if self.account_identifier is not None:
out.append("Account:")
out.append(indent(str(self.account_identifier),' '))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
ConstructionDeriveResponse.__str__ = str_ConstructionDeriveResponse
def str_TransactionIdentifierResponse(self : TransactionIdentifierResponse) -> str:
out = ["Transaction: {}".format(self.transaction_identifier.hash)]
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
TransactionIdentifierResponse.__str__ = str_TransactionIdentifierResponse
def str_ConstructionMetadataResponse(self : ConstructionMetadataResponse) -> str:
out = ["Metadata:"]
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
if self.suggested_fee is not None:
out.append("Suggested Fee(s):")
fees = "\n".join(["- {}".format(str(amt)) for amt in self.suggested_fee])
out.append(indent(fees,' '))
return "\n".join(out)
ConstructionMetadataResponse.__str__ = str_ConstructionMetadataResponse
def str_ConstructionParseResponse(self : ConstructionParseResponse) -> str:
out = ["Operations:"]
ops = "\n".join(["- {}".format(str(op)) for op in self.operations])
out.append(indent(ops,' '))
if self.signers is not None:
out.append("Signers:")
sigs = "\n".join(["- {}".format(sig) for sig in self.signers])
out.append(indent(sigs,' '))
if self.account_identifier_signers is not None:
out.append("Signers:")
sigs = "\n".join(["- {}".format(sig) for sig in self.account_identifier_signers])
out.append(indent(sigs,' '))
if self.metadata:
out.append("Additional Metadata:")
md = "\n".join(["- {}: {}".format(key, val) for key, val in self.metadata.items()])
out.append(indent(md,' '))
return "\n".join(out)
ConstructionParseResponse.__str__ = str_ConstructionParseResponse
def str_SigningPayload(self : SigningPayload) -> str:
out = []
if self.address is not None:
out.append("Address: {}".format(self.address))
if self.account_identifier is not None:
out.append("Account:")
out.append(indent(str(self.account_identifier),' '))
out.append("Hex Bytes: {}".format(self.hex_bytes))
if self.signature_type is not None:
out.append("Signature Type: {}".format(self.signature_type))
return "\n".join(out)
SigningPayload.__str__ = str_SigningPayload
def str_ConstructionPayloadsResposne(self : ConstructionPayloadsResponse) -> str:
out = ["Unsigned Transaction: {}".format(self.unsigned_transaction)]
out.append("Payloads:")
ps = "\n".join(["- {}".format(str(payload)) for payload in self.payloads])
out.append(indent(ps,' '))
return "\n".join(*put)
ConstructionPayloadsResponse.__str__ = str_ConstructionPayloadsResposne
def str_ConstructionPreprocessResponse(self : ConstructionPreprocessResponse) -> str:
out = []
if self.options is not None:
out.append("Options:")
opts = "\n".join(["- {}: {}".format(key, val) for key, val in self.options.items()])
out.append(indent(str(opts),' '))
if self.required_public_keys is not None:
out.append("Required Public Keys:")
pub_keys = "\n".join(["- {}".format(str(act)) for act in self.required_public_keys])
if out:
return "\n".join(out)
return ""
ConstructionPreprocessResponse.__str__ = str_ConstructionPreprocessResponse
def str_BlockEvent(self : BlockEvent) -> str:
out = ["Sequence: {}".format(self.sequence)]
out.append("Block:")
out.append(indent(str(self.block_identifier),' '))
out.append("Type: {}".format(self.type_))
return "\n".join(out)
BlockEvent.__str__ = str_BlockEvent
def str_EventsBlocksResponse(self : EventsBlocksResponse) -> str:
out = ["Max Sequence: {}".format(self.max_sequence)]
bes = "\n".join(["- {}".format(event) for event in self.events])
out.append(indent(bes,' '))
return "\n".join(out)
EventsBlocksResponse.__str__ = str_EventsBlocksResponse
def str_BlockTransaction(self : BlockTransaction) -> str:
out = ["Block:"]
out.append(indent(str(self.block_identifier),' '))
out.append("Transaction:")
out.append(indent(str(self.transaction),' '))
return "\n".join(out)
BlockTransaction.__str__ = str_BlockTransaction
def str_SearchTransactionsResponse(self : SearchTransactionsResponse) -> str:
out = ["Transactions:"]
txs = "\n".join(["- {}".format(str(tx)) for tx in self.transactions])
out.append(indent(txs,' '))
out.append("Total Count: {}".format(self.total_count))
if self.next_offset is not None:
out.append("Next Offset: {}".format(self.next_offset))
return "\n".join(out)
SearchTransactionsResponse.__str__ = str_SearchTransactionsResponse
| 39.317778 | 107 | 0.651048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,856 | 0.16142 |
fa0387e8bd74f0b0f503dfeea41eb47cd54c1fe4 | 1,331 | py | Python | src/lockstep/models/arheaderinfomodel.py | sfwatanabe/lockstep-sdk-python | b388c818663a4b090debb68c65c18728a082fec0 | [
"MIT"
] | 1 | 2022-03-17T00:23:24.000Z | 2022-03-17T00:23:24.000Z | src/lockstep/models/arheaderinfomodel.py | sfwatanabe/lockstep-sdk-python | b388c818663a4b090debb68c65c18728a082fec0 | [
"MIT"
] | null | null | null | src/lockstep/models/arheaderinfomodel.py | sfwatanabe/lockstep-sdk-python | b388c818663a4b090debb68c65c18728a082fec0 | [
"MIT"
] | null | null | null | #
# Lockstep Software Development Kit for Python
#
# (c) 2021-2022 Lockstep, Inc.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
# @author Ted Spence <tspence@lockstep.io>
# @copyright 2021-2022 Lockstep, Inc.
# @version 2022.4
# @link https://github.com/Lockstep-Network/lockstep-sdk-python
#
from dataclasses import dataclass
@dataclass
class ArHeaderInfoModel:
"""
Aggregated Accounts Receivable information.
"""
groupKey: str = None
reportPeriod: str = None
totalCustomers: int = None
totalInvoices: int = None
totalInvoicedAmount: float = None
totalUnappliedPayments: float = None
totalCollected: float = None
totalArAmount: float = None
totalInvoicesPaid: int = None
totalInvoicesPastDue: int = None
totalInvoices90DaysPastDue: int = None
totalPastDueAmount: float = None
totalPastDueAmount90Days: float = None
percentageOfTotalAr: float = None
dso: float = None
totalInvoiceAmountCurrentYear: float = None
totalInvoiceAmountPreviousYear: float = None
totalPaymentAmountCurrentYear: float = None
totalCollectedPastThirtyDays: int = None
totalInvoicesPaidPastThirtyDays: int = None
percentageOfTotalAr90DaysPastDue: float = None
| 28.934783 | 73 | 0.731029 | 891 | 0.669421 | 0 | 0 | 902 | 0.677686 | 0 | 0 | 436 | 0.327573 |
fa048e87ea337e5c20190eb2cfaa54ef301156c6 | 3,477 | py | Python | aae/server.py | ez-corp/easy | c0cd3eb8787eb445cbf2ea2fab4f5320aa229012 | [
"MIT"
] | null | null | null | aae/server.py | ez-corp/easy | c0cd3eb8787eb445cbf2ea2fab4f5320aa229012 | [
"MIT"
] | null | null | null | aae/server.py | ez-corp/easy | c0cd3eb8787eb445cbf2ea2fab4f5320aa229012 | [
"MIT"
] | null | null | null | # coding=utf-8
import time
from flask import Flask
from flask import jsonify
from flask import request
from werkzeug.exceptions import BadRequest
from containers import grade_submission, RunStatus
# TODO: move to conf file
TIME_EXCEEDED_MESSAGE = "Programmi kontrollimine ületas lubatud käivitusaega."
MEM_EXCEEDED_MESSAGE = "Programmi kontrollimine ületas lubatud mälumahtu."
app = Flask(__name__)
app.logger.setLevel("DEBUG")
def check_content(content):
if set(content.keys()) != {"submission", "grading_script", "assets", "image_name", "max_time_sec", "max_mem_mb"}:
raise BadRequest("Missing or incorrect parameter")
if not isinstance(content["assets"], list):
raise BadRequest("Assets must be list")
for dic in content["assets"]:
if set(dic.keys()) != {"file_name", "file_content"}:
raise BadRequest("Missing or incorrect parameter")
def assets_to_tuples(assets):
assets_list = []
for asset in assets:
assets_list.append((asset["file_name"], asset["file_content"]))
return assets_list
def parse_assessment_output(raw_output):
grade_separator = "#" * 50
grade_string = raw_output.rstrip().split("\n")[-1].lower().strip()
app.logger.debug("Grade string: " + grade_string)
if not grade_string.startswith("grade:"):
app.logger.error("'grade:' not found")
raise Exception("Incorrect grader output format")
grade_list = grade_string.split(":")
if len(grade_list) != 2:
app.logger.error("More : than expected, len(grade_list) = " + str(len(grade_list)))
raise Exception("Incorrect grader output format")
grade = grade_list[1].strip()
if not grade.isnumeric():
raise Exception("Grade is not a number")
output_rsplit = raw_output.rsplit(grade_separator, 1)
if len(output_rsplit) < 2:
app.logger.error("Grade separator missing")
raise Exception("Incorrect grader output format")
return round(float(grade)), grade_separator.join(output_rsplit[0:-1])
@app.route('/v1/grade', methods=['POST'])
def post_grade():
# app.logger.info("Request: " + request.get_data(as_text=True))
request_time = time.time()
app.logger.info("Request started: {}".format(request_time))
if not request.is_json:
raise BadRequest("Request body must be JSON")
content = request.get_json()
check_content(content)
# TODO: dummy switch from conf
status, raw_output = grade_submission(content["submission"], content["grading_script"],
assets_to_tuples(content["assets"]), content["image_name"],
content["max_time_sec"], content["max_mem_mb"], app.logger, request_time)
if status == RunStatus.SUCCESS:
assessment = parse_assessment_output(raw_output)
elif status == RunStatus.TIME_EXCEEDED:
assessment = (0, TIME_EXCEEDED_MESSAGE)
elif status == RunStatus.MEM_EXCEEDED:
assessment = (0, MEM_EXCEEDED_MESSAGE)
else:
raise Exception("Unhandled run status: " + status.name)
# app.logger.info("Assessment: " + str(assessment))
app.logger.info("Request finished: {}".format(request_time))
return jsonify({"grade": assessment[0], "feedback": assessment[1]})
@app.errorhandler(BadRequest)
def handle_bad_request(e):
return jsonify({"message": e.description}), 400
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000)
| 31.609091 | 117 | 0.67702 | 0 | 0 | 0 | 0 | 1,366 | 0.392416 | 0 | 0 | 996 | 0.286125 |
fa04a1e6f72faada0d2891b854d62e5f2153ccd5 | 1,046 | py | Python | code03[efficient].py | inaxia/face_recognition_in_image | 1979e89e22a79b4473b5f629669709da886e3e0b | [
"MIT"
] | 1 | 2020-10-19T20:18:11.000Z | 2020-10-19T20:18:11.000Z | code03[efficient].py | inaxia/face_recognition_in_image | 1979e89e22a79b4473b5f629669709da886e3e0b | [
"MIT"
] | null | null | null | code03[efficient].py | inaxia/face_recognition_in_image | 1979e89e22a79b4473b5f629669709da886e3e0b | [
"MIT"
] | null | null | null | # THIS IS A SHORTENED CODE
# WE ARE COMPARING ONE IMAGE WITH ALL IMAGES IN 'ASSETS' FOLDER
# ALSO CHECKS THE TOTAL TIME TAKEN
# HERE, IMAGES ARE NOT SHOWN
from cv2 import cv2
import face_recognition
import os
import time
# FOR CHECKING THE CPU TIME
startTimer = time.process_time()
# FUNCTION TO GET FACE LOCATION AND FACE ENCODINGS
def returnImageDetails(imagePath):
image = face_recognition.load_image_file(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
location = face_recognition.face_locations(image)[0]
encode = face_recognition.face_encodings(image)[0]
return [location, encode]
imagePath = os.listdir('assets')
testImageDetails = returnImageDetails('testAssets/johnny-depp-test.jpg')
for i in range(len(imagePath)):
imageDetails = returnImageDetails('assets/' + imagePath[i])
result = []
result = face_recognition.compare_faces([imageDetails[1]], testImageDetails[1])
print(imagePath[i], result)
# FOR PRINTING THE TIME TAKEN TO EXECUTE THE CODE
print(time.process_time() - startTimer)
| 32.6875 | 83 | 0.760038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.31262 |
fa04b991cc6d146ac7f1dac666aa9bb071d80333 | 908 | py | Python | scripts/media_to_wp.py | benjaminaschultz/pypress | 358e088ac361aa4ae808f0a3d05fb6320d62240c | [
"MIT"
] | 2 | 2015-01-25T17:21:53.000Z | 2020-02-15T08:30:26.000Z | scripts/media_to_wp.py | benjaminaschultz/pypress | 358e088ac361aa4ae808f0a3d05fb6320d62240c | [
"MIT"
] | 1 | 2015-04-22T20:36:01.000Z | 2015-04-22T20:36:01.000Z | scripts/media_to_wp.py | benjaminaschultz/pypress | 358e088ac361aa4ae808f0a3d05fb6320d62240c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os,re,sys
import mimetypes as mt
import argparse
import wordpress_xmlrpc as wp
from pypress import *
def main(argv,client=None):
parser = argparse.ArgumentParser()
parser.add_argument('-b','--blog', help='url of wordpress blog to which you want to post',dest='url')
parser.add_argument('-u','--user', help="username with which you'd like to post to the blog",dest='username')
parser.add_argument('-p','--password', help="password with which you'd like to post to the blog",dest='password')
parser.add_argument('files', help='file to be uploaded to the Glog',
nargs='+')
args = parser.parse_args(argv)
conf= WPConfig(url=args.url,username=args.username,password=args.password)
if (client is None):
client = conf.getDefaultClient()
wmpu=WPMediaUploader(client)
wmpu.upload(args.files)
if __name__=="__main__":
main(sys.argv[1:])
| 34.923077 | 115 | 0.709251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.321586 |
fa05d36ef8abee158f25e36a1d1976c52c9722c5 | 6,577 | py | Python | GOKOTAI/commands/Meteor/entry.py | kantoku-code/Fusion360_GOKOTAI | 1a0644233ca3638c6f864d135f69f90192f31c23 | [
"MIT"
] | 1 | 2022-03-18T13:03:22.000Z | 2022-03-18T13:03:22.000Z | GOKOTAI/commands/Meteor/entry.py | kantoku-code/Fusion360_GOKOTAI | 1a0644233ca3638c6f864d135f69f90192f31c23 | [
"MIT"
] | null | null | null | GOKOTAI/commands/Meteor/entry.py | kantoku-code/Fusion360_GOKOTAI | 1a0644233ca3638c6f864d135f69f90192f31c23 | [
"MIT"
] | null | null | null | import adsk.core
import adsk.fusion
import os
from ...lib import fusion360utils as futil
from ... import config
import math
app = adsk.core.Application.get()
ui = app.userInterface
# TODO *** コマンドのID情報を指定します。 ***
CMD_ID = f'{config.COMPANY_NAME}_{config.ADDIN_NAME}_Meteor'
CMD_NAME = 'メテオ'
CMD_Description = 'ボディにZの上方向から大量の点を降り注ぎます'
# パネルにコマンドを昇格させることを指定します。
IS_PROMOTED = True
# TODO *** コマンドボタンが作成される場所を定義します。 ***
# これは、ワークスペース、タブ、パネル、および
# コマンドの横に挿入されます。配置するコマンドを指定しない場合は
# 最後に挿入されます。
WORKSPACE_ID = config.design_workspace
TAB_ID = config.design_tab_id
TAB_NAME = config.design_tab_name
PANEL_ID = config.create_panel_id
PANEL_NAME = config.create_panel_name
PANEL_AFTER = config.create_panel_after
COMMAND_BESIDE_ID = ''
# コマンドアイコンのリソースの場所、ここではこのディレクトリの中に
# "resources" という名前のサブフォルダを想定しています。
ICON_FOLDER = os.path.join(
os.path.dirname(
os.path.abspath(__file__)
),
'resources',
''
)
# イベントハンドラのローカルリストで、参照を維持するために使用されます。
# それらは解放されず、ガベージコレクションされません。
local_handlers = []
_bodyIpt: adsk.core.SelectionCommandInput = None
_countIpt: adsk.core.IntegerSpinnerCommandInput = None
# アドイン実行時に実行されます。
def start():
# コマンドの定義を作成する。
cmd_def = ui.commandDefinitions.addButtonDefinition(
CMD_ID,
CMD_NAME,
CMD_Description,
ICON_FOLDER
)
# コマンド作成イベントのイベントハンドラを定義します。
# このハンドラは、ボタンがクリックされたときに呼び出されます。
futil.add_handler(cmd_def.commandCreated, command_created)
# ******** ユーザーがコマンドを実行できるように、UIにボタンを追加します。 ********
# ボタンが作成される対象のワークスペースを取得します。
workspace = ui.workspaces.itemById(WORKSPACE_ID)
toolbar_tab = workspace.toolbarTabs.itemById(TAB_ID)
if toolbar_tab is None:
toolbar_tab = workspace.toolbarTabs.add(TAB_ID, TAB_NAME)
# ボタンが作成されるパネルを取得します。
panel = workspace.toolbarPanels.itemById(PANEL_ID)
if panel is None:
panel = toolbar_tab.toolbarPanels.add(PANEL_ID, PANEL_NAME, PANEL_AFTER, False)
# 指定された既存のコマンドの後に、UI のボタンコマンド制御を作成します。
control = panel.controls.addCommand(cmd_def, COMMAND_BESIDE_ID, False)
# コマンドをメインツールバーに昇格させるかどうかを指定します。
control.isPromoted = IS_PROMOTED
# アドイン停止時に実行されます。
def stop():
# このコマンドのさまざまなUI要素を取得する
workspace = ui.workspaces.itemById(WORKSPACE_ID)
panel = workspace.toolbarPanels.itemById(PANEL_ID)
command_control = panel.controls.itemById(CMD_ID)
command_definition = ui.commandDefinitions.itemById(CMD_ID)
# ボタンコマンドの制御を削除する。
if command_control:
command_control.deleteMe()
# コマンドの定義を削除します。
if command_definition:
command_definition.deleteMe()
def command_created(args: adsk.core.CommandCreatedEventArgs):
futil.log(f'{CMD_NAME}:{args.firingEvent.name}')
cmd: adsk.core.Command = adsk.core.Command.cast(args.command)
cmd.isPositionDependent = True
# **inputs**
inputs: adsk.core.CommandInputs = cmd.commandInputs
global _bodyIpt
_bodyIpt = inputs.addSelectionInput(
'bodyIptId',
'ボディ',
'ボディを選択'
)
_bodyIpt.addSelectionFilter('Bodies')
global _countIpt
_countIpt = inputs.addIntegerSpinnerCommandInput(
'countIptId',
'分割数',
1,
30,
1,
10
)
# **event**
futil.add_handler(
cmd.destroy,
command_destroy,
local_handlers=local_handlers
)
futil.add_handler(
cmd.executePreview,
command_executePreview,
local_handlers=local_handlers
)
def command_destroy(args: adsk.core.CommandEventArgs):
futil.log(f'{CMD_NAME}:{args.firingEvent.name}')
global local_handlers
local_handlers = []
def command_executePreview(args: adsk.core.CommandEventArgs):
futil.log(f'{CMD_NAME}:{args.firingEvent.name}')
global _countIpt
# unitMgr: adsk.core.UnitsManager = futil.app.activeProduct.unitsManager
# pitch = unitMgr.convert(
# _countIpt.value,
# unitMgr.defaultLengthUnits,
# unitMgr.internalUnits
# )
global _bodyIpt
initMeteorSketch(
_bodyIpt.selection(0).entity,
adsk.core.Vector3D.create(0,0,-1),
_countIpt.value,
)
args.isValidResult = True
# ******************
def initMeteorSketch(
targetBody: adsk.fusion.BRepBody,
rayDirection: adsk.core.Vector3D,
stepCount: int = 10,
isRev: bool = False) -> adsk.fusion.Sketch:
comp: adsk.fusion.Component = targetBody.parentComponent
pnts = getPointsFromRayDirection(
targetBody,
rayDirection,
stepCount,
)
if len(pnts) < 1:
return
skt: adsk.fusion.Sketch = comp.sketches.add(
comp.xYConstructionPlane
)
sktPnts: adsk.fusion.SketchPoints = skt.sketchPoints
skt.isComputeDeferred = True
[sktPnts.add(p) for p in pnts]
skt.isComputeDeferred = False
return skt
def getPointsFromRayDirection(
targetBody: adsk.fusion.BRepBody,
rayDirection: adsk.core.Vector3D,
stepCount: int = 10,
isRev: bool = False) -> list:
comp: adsk.fusion.Component = targetBody.parentComponent
bBox: adsk.core.BoundingBox3D = targetBody.boundingBox
minPnt: adsk.core.Point3D = bBox.minPoint
maxPnt: adsk.core.Point3D = bBox.maxPoint
stepX = (bBox.maxPoint.x - bBox.minPoint.x) / (stepCount - 1)
stepY = (bBox.maxPoint.y - bBox.minPoint.y) / (stepCount - 1)
tempPnts = []
for idxX in range(stepCount):
for idxY in range(stepCount):
tempPnts.append(
adsk.core.Point3D.create(
minPnt.x + stepX * idxX,
minPnt.y + stepY * idxY,
maxPnt.z + 1
)
)
pnts = []
hitPnts: adsk.core.ObjectCollection = adsk.core.ObjectCollection.create()
for pnt in tempPnts:
hitPnts.clear()
bodies: adsk.core.ObjectCollection = comp.findBRepUsingRay(
pnt,
rayDirection,
adsk.fusion.BRepEntityTypes.BRepBodyEntityType,
-1.0,
True,
hitPnts
)
if bodies.count < 1:
continue
bodyLst = [b for b in bodies]
hitPntLst = [p for p in hitPnts]
for body, pnt in zip(bodyLst, hitPntLst):
if body == targetBody:
pnts.append(pnt)
continue
return pnts | 26.203187 | 88 | 0.638285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,263 | 0.294164 |
fa0619b095728c9cbe0aa36d7fe788dda554c238 | 3,704 | py | Python | tp/log/es.py | chinapnr/agbot | 9739ce1c2198e50111629db2d1de785edd06876e | [
"MIT"
] | 2 | 2018-06-23T06:48:46.000Z | 2018-06-23T10:11:50.000Z | tp/log/es.py | chinapnr/agbot | 9739ce1c2198e50111629db2d1de785edd06876e | [
"MIT"
] | 5 | 2020-01-03T09:33:02.000Z | 2021-06-02T00:49:52.000Z | tp/log/es.py | chinapnr/agbot | 9739ce1c2198e50111629db2d1de785edd06876e | [
"MIT"
] | 1 | 2021-07-07T07:17:27.000Z | 2021-07-07T07:17:27.000Z | import re
import time
from datetime import datetime
from enum import Enum
from fishbase.fish_logger import logger
from .elk_connector import Es
from ..base.tp_base import TpBase, TestStatus, Conf, VerticalContext
from ..base.tp_base import get_params_dict
# LogTestPoint
class LogESTestPoint(TpBase):
# 类的初始化过程
# 2018.6.11 create by yanan.wu #748921
def __init__(self, tp_conf, vertical_context: VerticalContext):
TpBase.__init__(self, tp_conf, vertical_context)
self.conf_enum = LogESTestPointEnum
self.__tc_start_time = ''
self.vertical_context = vertical_context
# 准备请求参数
# 2018.6.11 create by yanan.wu #748921
def build_request(self):
tc_ctx = self.vertical_context.tc_context
try:
# 获取请参
self.req_param = {'index': self.tp_conf.get('index'),
'key_word': self.tp_conf.get('key_word')}
# 获取 tc 执行起始时间
time_struct = time.mktime(tc_ctx.start_time.timetuple())
self.__tc_start_time = datetime.utcfromtimestamp(
time_struct).strftime('%Y-%m-%dT%H:%M:%S')
return self.req_param
except RuntimeError as e:
logger.error('tp->log:get req params error: {}'.format(str(e)))
raise Exception(str(e))
# 测试案例的执行
# 2018.6.11 create by yanan.wu #748921
def execute(self, request):
try:
es_conf = {}
# 发起接口调用请求并接收响应
es = Es(es_conf['server_ip'], es_conf['server_port'],
es_conf['auth_user'], es_conf['auth_password'])
tp_utc_time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
resp = es.search_match(request.get(LogESTestPointEnum.index.key),
self.__tc_start_time,
tp_utc_time, 100,
request.get(LogESTestPointEnum.key_word.key))
return resp, ''
except Exception as e:
logger.error('tp->log: execute error: {}'.format(str(e)))
raise Exception(str(e))
# 预期结果的校验
# 2018.6.11 create by yanan.wu #748921
def test_status(self):
tc_ctx = self.vertical_context.tc_context
# 获取期望返回参数
if self.tp_conf.get('expect_data'):
params_name_list = self.tp_conf.get('expect_data').split(',')
self.expect_dict = get_params_dict(params_name_list, tc_ctx.tc_detail.data)
if self.tp_conf.get('check_type') == LogCheckType.ROWS_CHECK.value:
if self.expect_dict.get(LogESTestPointEnum.expect_data.key) == str(tc_ctx.current_tp_context.response.content['hits']['total']):
return TestStatus.PASSED
else:
return TestStatus.NOT_PASSED
if self.tp_conf.get('check_type') == LogCheckType.REG_CHECK.value:
for hit in tc_ctx.current_tp_context.response.content['hits']['hits']:
match_obj = re.search(
self.expect_dict.get(LogESTestPointEnum.expect_data.key),
hit['_source']['message'])
if match_obj:
return TestStatus.PASSED
return TestStatus.NOT_PASSED
# 后处理
def post_handler(self):
pass
# 日志检查类型
# 2018.6.12 create by yanan.wu #806640
class LogCheckType(Enum):
# 行数校验
ROWS_CHECK = '01'
# 正则校验
REG_CHECK = '02'
# 日志配置文件枚举
class LogESTestPointEnum(Conf):
tp_name = 'tp_name', 'tp 名称', True, ''
key_word = 'key_word', '查询关键字', False, ''
index = 'index', '查询索引', True, ''
check_type = 'check_type', '校验方式', True, '01'
expect_data = 'expect_data', '期望返回结果', True, '' | 35.615385 | 140 | 0.601242 | 3,548 | 0.906027 | 0 | 0 | 0 | 0 | 0 | 0 | 920 | 0.234934 |
fa063d565e386e5f6704b88cfa4179fa400be8cb | 5,326 | py | Python | main.py | nmanzini/flashcardipy | a36be3d733d27d5252485d3b9ff71437dc3453cf | [
"MIT"
] | null | null | null | main.py | nmanzini/flashcardipy | a36be3d733d27d5252485d3b9ff71437dc3453cf | [
"MIT"
] | null | null | null | main.py | nmanzini/flashcardipy | a36be3d733d27d5252485d3b9ff71437dc3453cf | [
"MIT"
] | null | null | null | import sqlite3, random, os
import time
name = 'test01.db'
filename = "grelist.txt"
conn = sqlite3.connect(name)
c = conn.cursor()
'''word, definition, example, history, time, seen, right, wrong, streak, reported'''
class Word(object):
def __init__(self, row_id):
"""
Initialize a Word object with all that stuff
:param row_id: integer value of the row
:type row_id: integer
"""
c.execute('SELECT * FROM words WHERE rowid = ' + str(row_id))
word_data = c.fetchone()
self.row_id = row_id
self.word = word_data[0]
self.definition = word_data[1]
self.example = word_data[2]
self.history = word_data[3]
self.time_h = word_data[4]
self.seen = word_data[5]
self.right = word_data[6]
self.wrong = word_data[7]
self.streak = word_data[8]
self.reported = word_data[9]
def show(self):
"""
shows the previously selected word and react to the input
:return:
:rtype:
"""
# TODO: polish the console gui by adding an introduction at the beginning
# TODO: polish the visualization of words, showing history and last time seen.
positive = ("yes", "y", "Y", "Yes", "YES", "1", " ")
negative = ("no", "n", "N", "No", "NO", "0", "")
exit_answers = ("exit", "e")
report = ("report", "r")
print()
print(" " + self.word.upper())
print()
print()
answer = input('do you remember this word?')
print()
if answer in positive:
print("DEFINITION:")
print(self.definition)
print()
print()
print("good!")
input('press enter when done')
self.opened_edit()
self.positive_edit()
self.streak_edit(1)
elif answer in negative:
print("DEFINITION:")
print(self.definition)
print()
print()
print("you will remember next time")
input('press enter when done')
self.opened_edit()
self.negative_edit()
self.streak_edit(0)
elif answer in exit_answers:
print("Ok, see you soon!")
return True
elif answer in report:
print("sorry the word was incorrect")
self.report_edit()
else:
print("invalid input")
self.update()
os.system('cls')
return
def opened_edit(self):
"""
react to the opening of the file, increments seen and add a time slot
:return:
:rtype:
"""
if self.time_h:
self.time_h += " , " + str(int(time.time()))
else:
self.time_h = str((int(time.time())))
self.seen += 1
# TODO: merge opened(self,input) with positive and negative, the input shal be 1 or 0 for right or wrong
def positive_edit(self):
"""
react to the positive answer updating histoy and the right counter
:return:
:rtype:
"""
if self.history:
self.history += 1
else:
self.history = 1
self.right +=1
def negative_edit(self):
"""
React to a negative answer updating history and wrong
:return:
:rtype:
"""
if self.history:
self.history += 0
else:
self.history = 0
self.wrong += 1
def report_edit(self):
self.reported = 1
def streak_edit(self, value):
"""
update the streak, positive means the user is on a positive streak for the word and vice versa
:param value: integer (1 or 0)
:type value: int
"""
if value == 1:
if self.streak >= 0:
self.streak += 1
else:
self.streak = 1
if value == 0:
if self.streak >= 0:
self.streak = -1
else:
self.streak += -1
def update(self):
variables = ['history', 'time', 'seen', 'right', 'wrong', 'streak', 'reported']
marks = ["?"]*len(variables)
values = [self.history, self.time_h, self.seen, self.right, self.wrong, self.streak, self.reported]
output_list = [a+" = "+b for a, b in zip(variables, marks)]
output_line = " , ".join(output_list)
c.execute('UPDATE words SET '+ output_line+' WHERE rowid = '+str(self.row_id),values)
conn.commit()
def chooser():
case = random.random()
if case < 0.70:
c.execute('SELECT rowid FROM words WHERE reported = 0 ORDER BY RANDOM() LIMIT 1;')
elif case < 0.95:
c.execute('SELECT rowid FROM words WHERE reported = 0 and streak < 0 ORDER BY RANDOM() LIMIT 1;')
else:
c.execute('SELECT rowid FROM words WHERE reported = 0 and streak > 0 ORDER BY RANDOM() LIMIT 1;')
row_id = c.fetchone()
if not row_id:
c.execute('SELECT rowid FROM words WHERE reported = 0 ORDER BY RANDOM() LIMIT 1;')
row_id = c.fetchone()
return row_id[0]
if __name__ == "__main__":
while True:
test_word = Word(chooser())
result = test_word.show()
if result:
break
| 28.481283 | 108 | 0.533421 | 4,369 | 0.820315 | 0 | 0 | 0 | 0 | 0 | 0 | 1,918 | 0.36012 |
fa078e49a61057ac1da2c5e2c17b188b1fbf01de | 128 | py | Python | shit.py | rangehow/TransformerForMT | 48ec2fb5350003063290f2ad14d55c642517c026 | [
"MIT"
] | null | null | null | shit.py | rangehow/TransformerForMT | 48ec2fb5350003063290f2ad14d55c642517c026 | [
"MIT"
] | null | null | null | shit.py | rangehow/TransformerForMT | 48ec2fb5350003063290f2ad14d55c642517c026 | [
"MIT"
] | null | null | null | import math
from typing import List
import numpy as np
import torch
a = torch.randn(4, 3,2)
print(a)
print(torch.argmax(a, -1)) | 16 | 26 | 0.734375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
fa07d8d54e3e7e71f0d6bf08b9fe0410c6904351 | 6,536 | py | Python | tcr/status.py | kris-76/thecardroom | 8a527f0a6d8e3339bbd76fe2bebe029517f29deb | [
"MIT"
] | 5 | 2021-11-27T16:40:05.000Z | 2022-02-20T18:46:43.000Z | tcr/status.py | kris-76/thecardroom | 8a527f0a6d8e3339bbd76fe2bebe029517f29deb | [
"MIT"
] | null | null | null | tcr/status.py | kris-76/thecardroom | 8a527f0a6d8e3339bbd76fe2bebe029517f29deb | [
"MIT"
] | 4 | 2022-02-03T08:08:46.000Z | 2022-03-03T07:14:41.000Z | #
# Copyright 2021 Kristofer Henderson
#
# MIT License:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from tcr.wallet import Wallet
from tcr.wallet import WalletExternal
from tcr.cardano import Cardano
from tcr.database import Database
import logging
import argparse
import tcr.command
import tcr.nftmint
import traceback
def main():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--network', required=True,
action='store',
type=str,
metavar='NAME',
help='Which network to use, [mainnet | testnet]')
parser.add_argument('--wallet', required=False,
action='store',
type=str,
default=None,
metavar='NAME',
help='Dump UTXOs from wallet')
parser.add_argument('--policy', required=False,
action='store',
type=str,
default=None,
metavar='NAME',
help='')
args = parser.parse_args()
network = args.network
wallet_name = args.wallet
policy_name = args.policy
if not network in tcr.command.networks:
raise Exception('Invalid Network: {}'.format(network))
tcr.nftmint.setup_logging(network, 'status')
logger = logging.getLogger(network)
cardano = Cardano(network, '{}_protocol_parameters.json'.format(network))
tip = cardano.query_tip()
cardano.query_protocol_parameters()
tip_slot = tip['slot']
database = Database('{}.ini'.format(network))
database.open()
meta = database.query_chain_metadata()
db_size = database.query_database_size()
latest_slot = database.query_latest_slot()
sync_progress = database.query_sync_progress()
logger.info('Database Chain Metadata: {} / {}'.format(meta[1], meta[2]))
logger.info('Database Size: {}'.format(db_size))
logger.info('Cardano Node Tip Slot: {}'.format(tip_slot))
logger.info(' Database Latest Slot: {}'.format(latest_slot))
logger.info('Sync Progress: {}'.format(sync_progress))
wallet = None
if wallet_name != None:
if wallet_name.startswith('addr'):
wallet = WalletExternal('external', cardano.get_network, wallet_name)
else:
wallet = Wallet(wallet_name, cardano.get_network())
if not wallet.exists():
logger.error('Wallet: <{}> does not exist'.format(wallet_name))
raise Exception('Wallet: <{}> does not exist'.format(wallet_name))
stake_address = database.query_stake_address(wallet.get_payment_address(Wallet.ADDRESS_INDEX_MINT))
logger.info(' Root address = {}'.format(wallet.get_payment_address(Wallet.ADDRESS_INDEX_ROOT)))
logger.info(' Mint address = {}'.format(wallet.get_payment_address(Wallet.ADDRESS_INDEX_MINT)))
logger.info('Presale address = {}'.format(wallet.get_payment_address(Wallet.ADDRESS_INDEX_PRESALE)))
logger.info(' Stake address = {}'.format(stake_address))
cardano.dump_utxos_sorted(database, wallet)
if policy_name != None:
policies = policy_name.split(',')
logger.info('')
#logger.info("By Token: ")
by_address = {}
i = 1
for policy in policies:
if cardano.get_policy_id(policy) == None:
logger.error('Policy: <{}> does not exist'.format(policy))
raise Exception('Policy: <{}> does not exist'.format(policy))
tokens = database.query_current_owner(cardano.get_policy_id(policy))
logger.info('{} = {} tokens'.format(policy, len(tokens)))
keys = list(tokens.keys())
keys.sort()
for name in keys:
address = tokens[name]['address']
slot = tokens[name]['slot']
logger.info('{}. {} owned by {} at slot {}'.format(i, name, address, slot))
i += 1
if address in by_address:
by_address[address].append(name)
else:
by_address[address] = [name]
holders = list(by_address.items())
def sort_by_length(item):
return len(item[1])
holders.sort(key=sort_by_length)
logger.info('')
logger.info('')
logger.info('By Owner:')
logger.info('len = {}'.format(len(holders)))
i = 1
for holder in holders:
logger.info('{: 4}. {}({})'.format(i, holder[0], len(holder[1])))
tokens = holder[1]
token_str = ''
j = 0
for token in tokens:
if len(token_str) == 0:
token_str += token
else:
token_str += ', ' + token
j += 1
if j == 8:
logger.info(' {}'.format(token_str))
j = 0
token_str = ''
if j > 0:
logger.info(' {}'.format(token_str))
i += 1
if __name__ == '__main__':
try:
main()
except Exception as e:
print('')
print('')
print('EXCEPTION: {}'.format(e))
print('')
traceback.print_exc()
| 38.674556 | 108 | 0.577264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,844 | 0.28213 |
fa0982dc1f168404670380a21142d00f25739e44 | 2,863 | py | Python | aws/ec2/manage.py | amaga38/discord_bot | 99093189ef9e50ca4152b47546deacec78cb6983 | [
"MIT"
] | null | null | null | aws/ec2/manage.py | amaga38/discord_bot | 99093189ef9e50ca4152b47546deacec78cb6983 | [
"MIT"
] | null | null | null | aws/ec2/manage.py | amaga38/discord_bot | 99093189ef9e50ca4152b47546deacec78cb6983 | [
"MIT"
] | null | null | null | import sys
import json
import boto3
from botocore.exceptions import ClientError
from . import config
def status_instance(instance_id, dry_run=False):
ec2 = boto3.client('ec2',
region_name='ap-northeast-1',
aws_access_key_id=config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=config.AWS_SECRET_KEY)
if (dry_run):
try:
ec2.describe_instance_status(
Instance_Ids=[instance_id], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
else:
print(e)
try:
response = ec2.describe_instance_status(
InstanceIds=[instance_id], DryRun=False)
print(response)
return response
except ClientError as e:
print(e)
return ''
def start_instance(instance_id, dry_run=False):
ec2 = boto3.client('ec2',
region_name='ap-northeast-1',
aws_access_key_id=config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=config.AWS_SECRET_KEY)
if (dry_run):
# DryRunで確認
try:
ec2.start_instances(InstanceIds=[instance_id], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
else:
print(e)
try:
response = ec2.start_instances(InstanceIds=[instance_id], DryRun=False)
return response
except ClientError as e:
print(e)
return ''
def stop_instance(instance_id, dry_run=False):
ec2 = boto3.client('ec2',
region_name='ap-northeast-1',
aws_access_key_id=config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=config.AWS_SECRET_KEY)
if (dry_run):
try:
ec2.stop_instances(InstanceIds=[instance_id], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
else:
print(e)
try:
response = ec2.stop_instances(InstanceIds=[instance_id], DryRun=False)
return response
except ClientError as e:
print(e)
return ''
def test():
instance_id = 'i-xxxxxxxxxxxxxxxxx'
ec2 = boto3.client('ec2',
region_name='ap-northeast-1',
aws_access_key_id=config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=config.AWS_SECRET_KEY)
try:
response = ec2.describe_instance_status(
InstanceIds=[instance_id], DryRun=False)
print(response)
except ClientError as e:
print(e)
if __name__ == '__main__':
test()
| 30.457447 | 80 | 0.555012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.066225 |
fa0a0031b6771b43d0976594e38309c2e2f0ab94 | 156 | py | Python | submissions/templatetags/auth_extras.py | lesves/acceptor | 07c3e144e93f27e8355effbfe95a1f01dc818a90 | [
"MIT"
] | 1 | 2022-01-03T21:42:37.000Z | 2022-01-03T21:42:37.000Z | submissions/templatetags/auth_extras.py | lesves/acceptor | 07c3e144e93f27e8355effbfe95a1f01dc818a90 | [
"MIT"
] | null | null | null | submissions/templatetags/auth_extras.py | lesves/acceptor | 07c3e144e93f27e8355effbfe95a1f01dc818a90 | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.filter
def has_group(user, name):
return user.groups.filter(name=name).exists()
| 19.5 | 50 | 0.75 | 0 | 0 | 0 | 0 | 93 | 0.596154 | 0 | 0 | 0 | 0 |
fa0b649415efbfbba1ac84c2d59ac1cdd94fe947 | 5,081 | py | Python | python_modules/dagster-graphql/dagster_graphql/implementation/fetch_pipelines.py | zzztimbo/dagster | 5cf8f159183a80d2364e05bb30362e2798a7af37 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql/implementation/fetch_pipelines.py | zzztimbo/dagster | 5cf8f159183a80d2364e05bb30362e2798a7af37 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql/implementation/fetch_pipelines.py | zzztimbo/dagster | 5cf8f159183a80d2364e05bb30362e2798a7af37 | [
"Apache-2.0"
] | null | null | null | import sys
from dagster_graphql.schema.pipelines import DauphinPipeline, DauphinPipelineSnapshot
from graphql.execution.base import ResolveInfo
from dagster import check
from dagster.core.definitions.pipeline import ExecutionSelector
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.utils.error import serializable_error_info_from_exc_info
from .utils import UserFacingGraphQLError, capture_dauphin_error
@capture_dauphin_error
def get_pipeline_snapshot_or_error(graphene_info, subset_id):
check.str_param(subset_id, 'subset_id')
selector = ExecutionSelector(subset_id)
pipeline_def = get_pipeline_def_from_selector(graphene_info, selector)
return DauphinPipelineSnapshot(pipeline_def.get_pipeline_index())
@capture_dauphin_error
def get_pipeline_or_error(graphene_info, selector):
'''Returns a DauphinPipelineOrError.'''
return DauphinPipeline(get_pipeline_def_from_selector(graphene_info, selector))
def get_pipeline_or_raise(graphene_info, selector):
'''Returns a DauphinPipeline or raises a UserFacingGraphQLError if one cannot be retrieved
from the selector, e.g., the pipeline is not present in the loaded repository.'''
return DauphinPipeline(get_pipeline_def_from_selector(graphene_info, selector))
def get_pipeline_reference_or_raise(graphene_info, selector):
'''Returns a DauphinPipelineReference or raises a UserFacingGraphQLError if a pipeline
reference cannot be retrieved from the selector, e.g, a UserFacingGraphQLError that wraps an
InvalidSubsetError.'''
return get_dauphin_pipeline_reference_from_selector(graphene_info, selector)
@capture_dauphin_error
def get_pipelines_or_error(graphene_info):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
return _get_pipelines(graphene_info)
def get_pipelines_or_raise(graphene_info):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
return _get_pipelines(graphene_info)
def _get_pipelines(graphene_info):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
repository = graphene_info.context.get_repository()
pipeline_instances = []
for pipeline_def in repository.get_all_pipelines():
pipeline_instances.append(graphene_info.schema.type_named('Pipeline')(pipeline_def))
return graphene_info.schema.type_named('PipelineConnection')(
nodes=sorted(pipeline_instances, key=lambda pipeline: pipeline.name)
)
def get_pipeline_def_from_selector(graphene_info, selector):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
check.inst_param(selector, 'selector', ExecutionSelector)
repository = graphene_info.context.get_repository()
if not repository.has_pipeline(selector.name):
raise UserFacingGraphQLError(
graphene_info.schema.type_named('PipelineNotFoundError')(pipeline_name=selector.name)
)
orig_pipeline = graphene_info.context.get_pipeline(selector.name)
if not selector.solid_subset:
return orig_pipeline
else:
for solid_name in selector.solid_subset:
if not orig_pipeline.has_solid_named(solid_name):
raise UserFacingGraphQLError(
graphene_info.schema.type_named('InvalidSubsetError')(
message='Solid "{solid_name}" does not exist in "{pipeline_name}"'.format(
solid_name=solid_name, pipeline_name=selector.name
),
pipeline=graphene_info.schema.type_named('Pipeline')(orig_pipeline),
)
)
try:
return orig_pipeline.build_sub_pipeline(selector.solid_subset)
except DagsterInvalidDefinitionError:
raise UserFacingGraphQLError(
graphene_info.schema.type_named('InvalidSubsetError')(
message=serializable_error_info_from_exc_info(sys.exc_info()).message,
pipeline=graphene_info.schema.type_named('Pipeline')(orig_pipeline),
)
)
def get_dauphin_pipeline_reference_from_selector(graphene_info, selector):
from ..schema.errors import DauphinPipelineNotFoundError, DauphinInvalidSubsetError
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
check.inst_param(selector, 'selector', ExecutionSelector)
try:
return graphene_info.schema.type_named('Pipeline')(
get_pipeline_def_from_selector(graphene_info, selector)
)
except UserFacingGraphQLError as exc:
if (
isinstance(exc.dauphin_error, DauphinPipelineNotFoundError)
or
# At this time DauphinPipeline represents a potentially subsetted
# pipeline so if the solids used to subset no longer exist
# we can't return the correct instance so we fallback to
# UnknownPipeline
isinstance(exc.dauphin_error, DauphinInvalidSubsetError)
):
return graphene_info.schema.type_named('UnknownPipeline')(selector.name)
raise
| 41.647541 | 98 | 0.739618 | 0 | 0 | 0 | 0 | 691 | 0.135997 | 0 | 0 | 925 | 0.182051 |
fa0d80b9aeafc9ccb67f6cea2a2f78fcffb1f863 | 701 | py | Python | tests/snippets/index_overflow.py | khg0712/RustPython | a04c19ccb0f5e7e1774d5e6f267ffed3ee27aeae | [
"MIT"
] | 3 | 2019-08-14T02:05:49.000Z | 2020-01-03T08:39:56.000Z | tests/snippets/index_overflow.py | khg0712/RustPython | a04c19ccb0f5e7e1774d5e6f267ffed3ee27aeae | [
"MIT"
] | 6 | 2021-10-14T15:55:16.000Z | 2022-03-31T14:04:02.000Z | tests/snippets/index_overflow.py | khg0712/RustPython | a04c19ccb0f5e7e1774d5e6f267ffed3ee27aeae | [
"MIT"
] | 1 | 2020-05-26T15:20:20.000Z | 2020-05-26T15:20:20.000Z | import sys
def expect_cannot_fit_index_error(s, index):
try:
s[index]
except IndexError:
pass
# TODO: Replace current except block with commented
# after solving https://github.com/RustPython/RustPython/issues/322
# except IndexError as error:
# assert str(error) == "cannot fit 'int' into an index-sized integer"
else:
assert False
MAX_INDEX = sys.maxsize + 1
MIN_INDEX = -(MAX_INDEX + 1)
test_str = "test"
expect_cannot_fit_index_error(test_str, MIN_INDEX)
expect_cannot_fit_index_error(test_str, MAX_INDEX)
test_list = [0, 1, 2, 3]
expect_cannot_fit_index_error(test_list, MIN_INDEX)
expect_cannot_fit_index_error(test_list, MAX_INDEX)
| 25.962963 | 77 | 0.727532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.322397 |
fa0e99500da23e759265befa87f20ecc71948e4b | 7,495 | py | Python | src/spn/experiments/FPGA/RunNative.py | QueensGambit/SPFlow | 2b4d5ec58ff90927177441004df0a49cb69791fb | [
"Apache-2.0"
] | null | null | null | src/spn/experiments/FPGA/RunNative.py | QueensGambit/SPFlow | 2b4d5ec58ff90927177441004df0a49cb69791fb | [
"Apache-2.0"
] | null | null | null | src/spn/experiments/FPGA/RunNative.py | QueensGambit/SPFlow | 2b4d5ec58ff90927177441004df0a49cb69791fb | [
"Apache-2.0"
] | null | null | null | """
Created on March 26, 2018
@author: Alejandro Molina
"""
import glob
import os
import platform
import subprocess
from collections import OrderedDict
import numpy as np
from natsort import natsorted
from spn.algorithms.Inference import likelihood
from spn.experiments.FPGA.GenerateSPNs import load_spn_from_file, fpga_count_ops
from spn.gpu.TensorFlow import spn_to_tf_graph
from spn.structure.Base import get_nodes_by_type, Node, get_number_of_edges, get_depth, Product, Leaf, Sum
np.set_printoptions(precision=50)
import time
def sum_to_tf_graph(node, children, data_placeholder, **args):
with tf.compat.v1.variable_scope("%s_%s" % (node.__class__.__name__, node.id)):
return tf.add_n([node.weights[i] * ctf for i, ctf in enumerate(children)])
def prod_to_tf_graph(node, children, data_placeholder, **args):
with tf.compat.v1.variable_scope("%s_%s" % (node.__class__.__name__, node.id)):
prod_res = None
for c in children:
if prod_res is None:
prod_res = c
else:
prod_res = tf.multiply(prod_res, c)
return prod_res
_node_tf_graph = {Sum: sum_to_tf_graph, Product: prod_to_tf_graph, Histogram: histogram_to_tf_graph}
path = os.path.dirname(__file__)
OS_name = platform.system()
def run_experiment(exp, spn, test_data, test_type, exp_lambda):
outprefix = path + "/spns/%s/" % (exp)
results_file = "%stime_test_%s_ll_%s.txt" % (outprefix, test_type, OS_name)
if os.path.isfile(results_file):
return
print(exp, test_data.shape, test_type)
ll, test_time = exp_lambda()
np.savetxt(results_file, ll, delimiter=";")
import cpuinfo
machine = cpuinfo.get_cpu_info()["brand"]
adds, muls = fpga_count_ops(spn)
test_n = test_data.shape[0]
results = OrderedDict()
results["Experiment"] = exp
results["OS"] = OS_name
results["machine"] = machine
results["test type"] = test_type
results["expected adds"] = adds
results["expected muls"] = muls
results["input rows"] = test_n
results["input cols"] = test_data.shape[1]
results["spn nodes"] = len(get_nodes_by_type(spn, Node))
results["spn sum nodes"] = len(get_nodes_by_type(spn, Sum))
results["spn prod nodes"] = len(get_nodes_by_type(spn, Product))
results["spn leaves"] = len(get_nodes_by_type(spn, Leaf))
results["spn edges"] = get_number_of_edges(spn)
results["spn layers"] = get_depth(spn)
results["time per task"] = test_time
results["time per instance"] = test_time / test_n
results["avg ll"] = np.mean(ll, dtype=np.float128)
results_file_name = "results.csv"
if not os.path.isfile(results_file_name):
results_file = open(results_file_name, "w")
results_file.write(";".join(results.keys()))
results_file.write("\n")
else:
results_file = open(results_file_name, "a")
results_file.write(";".join(map(str, results.values())))
results_file.write("\n")
results_file.close()
if __name__ == "__main__":
for exp in natsorted(map(os.path.basename, glob.glob(path + "/spns/*"))):
outprefix = path + "/spns/%s/" % (exp)
spn, words, _ = load_spn_from_file(outprefix)
print(exp, fpga_count_ops(spn))
data = np.loadtxt(outprefix + "all_data.txt", delimiter=";")
if data.shape[0] < 10000:
r = np.random.RandomState(17)
test_data = data[r.choice(data.shape[0], 10000), :]
else:
test_data = data
test_data_fname = outprefix + "time_test_data.txt"
if not os.path.isfile(test_data_fname):
np.savetxt(test_data_fname, test_data, delimiter=";", header=";".join(words))
def execute_tf():
import tensorflow as tf
from tensorflow.python.client import timeline
import json
tf.compat.v1.reset_default_graph()
elapsed = 0
data_placeholder = tf.compat.v1.placeholder(tf.int32, test_data.shape)
tf_graph = spn_to_tf_graph(spn, data_placeholder, log_space=False)
tfstart = time.perf_counter()
n_repeats = 1000
with tf.compat.v1.Session() as sess:
for i in range(n_repeats):
run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
run_metadata = tf.compat.v1.RunMetadata()
sess.run(tf.compat.v1.global_variables_initializer())
# start = time.perf_counter()
tf_ll = sess.run(
tf_graph,
feed_dict={data_placeholder: test_data},
options=run_options,
run_metadata=run_metadata,
)
continue
# end = time.perf_counter()
# e2 = end - start
ctf = timeline.Timeline(run_metadata.step_stats).generate_chrome_trace_format()
rfile_path = outprefix + "tf_timelines2/time_line_%s.json" % i
if not os.path.exists(os.path.dirname(rfile_path)):
os.mkdir(os.path.dirname(rfile_path))
results_file = open(rfile_path, "w")
results_file.write(ctf)
results_file.close()
traceEvents = json.loads(ctf)["traceEvents"]
run_time = max([o["ts"] + o["dur"] for o in traceEvents if "ts" in o and "dur" in o]) - min(
[o["ts"] for o in traceEvents if "ts" in o]
)
run_time *= 1000
if i > 0:
# the first run is 10 times slower for whatever reason
elapsed += run_time
# if i % 20 == 0:
# print(exp, i, e2, run_time)
tfend = time.perf_counter()
tfelapsed = (tfend - tfstart) * 1000000000
return np.log(tf_ll), tfelapsed / (n_repeats - 1)
run_experiment(exp, spn, test_data, "tensorflow7-time", execute_tf)
results_file = "%stime_test_%s_ll_%s.txt" % (outprefix, "tensorflow3", OS_name)
if not os.path.isfile(results_file):
ll, test_time = execute_tf()
print("mean ll", np.mean(ll))
np.savetxt(results_file, ll, delimiter=";")
nfile = outprefix + "spnexe_" + OS_name
def execute_native():
print("computing ll for: ", exp, test_data.shape, nfile)
cmd = "%s < %s" % (nfile, test_data_fname)
proc_output = subprocess.check_output(cmd, shell=True).decode("utf-8")
print("done")
lines = proc_output.split("\n")
cpp_ll = np.array(lines[0 : test_data.shape[0]], dtype=np.float128)
cpp_time = float(lines[-2].split(" ")[-2])
return cpp_ll, cpp_time
run_experiment(exp, spn, test_data, "native", execute_native)
nfile = outprefix + "spnexe_" + OS_name + "_fastmath"
run_experiment(exp, spn, test_data, "native_fast", execute_native)
def execute_python():
start = time.perf_counter()
py_ll = likelihood(spn, test_data)
end = time.perf_counter()
elapsed = end - start
return py_ll, elapsed * 1000000000
run_experiment(exp, spn, test_data, "python", execute_python)
| 34.068182 | 112 | 0.598132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.114877 |
fa1043f553e1c60667839cc0926b5837cdef559f | 576 | py | Python | api/views.py | AktanKasymaliev/django_blog_site_fullstack | 146a03a58c12bf61ff32cadfbb66e7f0ecbcf6b1 | [
"MIT"
] | 1 | 2021-06-29T15:17:06.000Z | 2021-06-29T15:17:06.000Z | api/views.py | AktanKasymaliev/django_blog_site_fullstack | 146a03a58c12bf61ff32cadfbb66e7f0ecbcf6b1 | [
"MIT"
] | null | null | null | api/views.py | AktanKasymaliev/django_blog_site_fullstack | 146a03a58c12bf61ff32cadfbb66e7f0ecbcf6b1 | [
"MIT"
] | null | null | null | from rest_framework import generics
from blogs.models import Comments
from .serializers import CommentsSerializer, UsersSerializers
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAdminUser
from customUsers.models import User
class CommentsView(generics.CreateAPIView):
serializer_class = CommentsSerializer
queryset = Comments.objects.all()
permission_classes = [IsAuthenticated]
# Users
class UsersView(generics.ListAPIView):
serializer_class = UsersSerializers
queryset = User.objects.all()
permission_classes = [IsAdminUser]
| 33.882353 | 77 | 0.8125 | 317 | 0.550347 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.013889 |
fa11053785bbc9426fa980dd1bd5615c9a3ba8c5 | 562 | py | Python | client/__init__.py | mycelium-ethereum/punk-offerbook | 5804a27fe26af0d613fd5281f0f17b9c207f1822 | [
"MIT"
] | null | null | null | client/__init__.py | mycelium-ethereum/punk-offerbook | 5804a27fe26af0d613fd5281f0f17b9c207f1822 | [
"MIT"
] | null | null | null | client/__init__.py | mycelium-ethereum/punk-offerbook | 5804a27fe26af0d613fd5281f0f17b9c207f1822 | [
"MIT"
] | null | null | null | from dotenv import load_dotenv
load_dotenv();
import os
import json
import settings
from web3 import Web3
from client.Mongo import Mongo
from client.Webhook import webhook
from client.Opensea import Opensea
def get_raw_abis(abi_paths):
raw_abis = {}
for abi_key, abi_path in abi_paths.items():
with open(abi_path, "r") as f:
raw_abis[abi_key] = json.loads(f.read())['abi']
return raw_abis
abis = get_raw_abis(settings.ABI_PATHS)
web3 = Web3(Web3.HTTPProvider(os.environ.get("ETH_HTTP_URL")))
mongo = Mongo()
opensea = Opensea() | 25.545455 | 62 | 0.731317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.039146 |
fa12c48a1797c75639adb55946f33942c0b816e2 | 401 | py | Python | src/indexer.py | HypoChloremic/fcsan | 37f75b69eab0285d309b198ffa51cee9556d849a | [
"MIT"
] | null | null | null | src/indexer.py | HypoChloremic/fcsan | 37f75b69eab0285d309b198ffa51cee9556d849a | [
"MIT"
] | null | null | null | src/indexer.py | HypoChloremic/fcsan | 37f75b69eab0285d309b198ffa51cee9556d849a | [
"MIT"
] | null | null | null | from analyze import Analyze
import argparse
# ap = argparse.ArgumentParser()
# ap.addargument("-f", "--folder")
# opts = ap.parse_args()
run = Analyze()
run.read()
files = run.files
def indexer():
with open("FACS_INDEX.txt", "w") as file:
for i in files:
run.read(i)
meta = run.meta
str_to_save = f"File: {meta['$FIL']},Date: {meta['$DATE']},\n"
file.write(str_to_save)
indexer() | 19.095238 | 65 | 0.645885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.391521 |
fa12c7d1995b97754015bbb18baea049a24d51b2 | 1,056 | py | Python | Leetcode Practice/strStr.py | falconcode16/pythonprogramming | fc53a879be473ebceb1d7da061b0e8fc2a20706c | [
"MIT"
] | 2 | 2020-04-11T14:15:10.000Z | 2020-05-12T09:57:29.000Z | Leetcode Practice/strStr.py | falconcode16/pythonprogramming | fc53a879be473ebceb1d7da061b0e8fc2a20706c | [
"MIT"
] | null | null | null | Leetcode Practice/strStr.py | falconcode16/pythonprogramming | fc53a879be473ebceb1d7da061b0e8fc2a20706c | [
"MIT"
] | 1 | 2021-10-10T02:13:42.000Z | 2021-10-10T02:13:42.000Z | # Link - https://leetcode.com/problems/implement-strstr/
"""
28. Implement strStr()
Implement strStr().
Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
Clarification:
What should we return when needle is an empty string? This is a great question to ask during an interview.
For the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf().
Example 1:
Input: haystack = "hello", needle = "ll"
Output: 2
Example 2:
Input: haystack = "aaaaa", needle = "bba"
Output: -1
Example 3:
Input: haystack = "", needle = ""
Output: 0
Constraints:
0 <= haystack.length, needle.length <= 5 * 104
haystack and needle consist of only lower-case English characters.
"""
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if len(needle) == 0:
return 0
else:
try:
return haystack.index(needle)
except ValueError:
return -1
| 22 | 138 | 0.660985 | 257 | 0.243371 | 0 | 0 | 0 | 0 | 0 | 0 | 794 | 0.751894 |
fa132dcd77d4356013f00fce442c60e3fac1fb8d | 816 | py | Python | skaio/scheduler.py | cipriantarta/skaio | 30716a3bd30d055c0c18d10a899522934bb71613 | [
"BSD-3-Clause"
] | null | null | null | skaio/scheduler.py | cipriantarta/skaio | 30716a3bd30d055c0c18d10a899522934bb71613 | [
"BSD-3-Clause"
] | null | null | null | skaio/scheduler.py | cipriantarta/skaio | 30716a3bd30d055c0c18d10a899522934bb71613 | [
"BSD-3-Clause"
] | null | null | null | import importlib.util
import inspect
from skaio import log
from skaio.core.publisher import Publisher
from skaio.core.base.task import BaseTask
from skaio.utils.common import get_loop
tasks = ['samples.simple_tasks']
class Scheduler:
def start(self):
publisher = Publisher()
loop = get_loop()
for task_mod in tasks:
m = importlib.import_module(task_mod)
task_classes = filter(lambda x: inspect.isclass(x[1])
and x[1].__name__ != 'BaseTask'
and issubclass(x[1], BaseTask),
inspect.getmembers(m))
for name, task in task_classes:
log.info(f'Sending tasks for {name}')
loop.run_until_complete(publisher.publish(task))
| 32.64 | 65 | 0.590686 | 595 | 0.729167 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.072304 |
fa14df4a641344d268ffc7e3fb5ac45c848bc4ad | 10,347 | py | Python | rotkehlchen/accounting/export/csv.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 137 | 2018-03-05T11:53:29.000Z | 2019-11-03T16:38:42.000Z | rotkehlchen/accounting/export/csv.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 385 | 2018-03-08T12:43:41.000Z | 2019-11-10T09:15:36.000Z | rotkehlchen/accounting/export/csv.py | rotkehlchenio/rotkehlchen | 98f49cd3ed26c641fec03b78eff9fe1872385fbf | [
"BSD-3-Clause"
] | 59 | 2018-03-08T10:08:27.000Z | 2019-10-26T11:30:44.000Z | import json
import logging
from csv import DictWriter
from pathlib import Path
from tempfile import mkdtemp
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Tuple
from zipfile import ZIP_DEFLATED, ZipFile
from rotkehlchen.accounting.pnl import PnlTotals
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.fval import FVal
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import Timestamp
from rotkehlchen.utils.mixins.customizable_date import CustomizableDateMixin
from rotkehlchen.utils.version_check import get_current_version
if TYPE_CHECKING:
from rotkehlchen.accounting.structures.processed_event import ProcessedAccountingEvent
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
FILENAME_ALL_CSV = 'all_events.csv'
ETH_EXPLORER = 'https://etherscan.io/tx/'
ACCOUNTING_SETTINGS = (
'include_crypto2crypto',
'taxfree_after_period',
'include_gas_costs',
'account_for_assets_movements',
'calculate_past_cost_basis',
)
CSV_INDEX_OFFSET = 2 # skip title row and since counting starts from 1
class CSVWriteError(Exception):
pass
def _dict_to_csv_file(path: Path, dictionary_list: List) -> None:
"""Takes a filepath and a list of dictionaries representing the rows and writes them
into the file as a CSV
May raise:
- CSVWriteError if DictWriter.writerow() tried to write a dict contains
fields not in fieldnames
"""
if len(dictionary_list) == 0:
log.debug('Skipping writting empty CSV for {}'.format(path))
return
with open(path, 'w', newline='') as f:
w = DictWriter(f, fieldnames=dictionary_list[0].keys())
w.writeheader()
try:
for dic in dictionary_list:
w.writerow(dic)
except ValueError as e:
raise CSVWriteError(f'Failed to write {path} CSV due to {str(e)}') from e
class CSVExporter(CustomizableDateMixin):
def __init__(
self,
database: 'DBHandler',
):
super().__init__(database=database)
self.reset(start_ts=Timestamp(0), end_ts=Timestamp(0))
def reset(self, start_ts: Timestamp, end_ts: Timestamp) -> None:
self.start_ts = start_ts
self.end_ts = end_ts
self.reload_settings()
try:
frontend_settings = json.loads(self.settings.frontend_settings)
if (
'explorers' in frontend_settings and
'ETH' in frontend_settings['explorers'] and
'transaction' in frontend_settings['explorers']['ETH']
):
self.eth_explorer = frontend_settings['explorers']['ETH']['transaction']
else:
self.eth_explorer = ETH_EXPLORER
except (json.decoder.JSONDecodeError, KeyError):
self.eth_explorer = ETH_EXPLORER
def _add_sumif_formula(
self,
check_range: str,
condition: str,
sum_range: str,
actual_value: FVal,
) -> str:
if self.settings.pnl_csv_with_formulas is False:
return str(actual_value)
return f'=SUMIF({check_range};{condition};{sum_range})'
def _add_pnl_type(
self,
event: 'ProcessedAccountingEvent',
dict_event: Dict[str, Any],
amount_column: str,
name: Literal['free', 'taxable'],
) -> None:
"""Adds the pnl type value and cost basis to the passed dict event"""
if getattr(event.pnl, name, ZERO) == ZERO:
return
index = event.index + CSV_INDEX_OFFSET
value_formula = f'{amount_column}{index}*H{index}'
total_value_formula = f'(F{index}*H{index}+G{index}*H{index})' # noqa: E501 # formula of both free and taxable
cost_basis_column = 'K' if name == 'taxable' else 'L'
cost_basis = f'{cost_basis_column}{index}'
should_count_entire_spend_formula = (
name == 'taxable' and event.timestamp >= self.start_ts or
name == 'free' and event.timestamp < self.start_ts
)
if event.count_entire_amount_spend and should_count_entire_spend_formula:
equation = (
f'=IF({cost_basis}="",'
f'-{total_value_formula},'
f'-{total_value_formula}+{value_formula}-{cost_basis})'
)
else:
equation = (
f'=IF({cost_basis}="",'
f'{value_formula},'
f'{value_formula}-{cost_basis})'
)
dict_event[f'pnl_{name}'] = equation
cost_basis = ''
if event.cost_basis is not None:
for acquisition in event.cost_basis.matched_acquisitions:
if name == 'taxable' and acquisition.taxable is False:
continue
if name == 'free' and acquisition.taxable is True:
continue
index = acquisition.event.index + CSV_INDEX_OFFSET
if cost_basis == '':
cost_basis = '='
else:
cost_basis += '+'
cost_basis += f'{str(acquisition.amount)}*H{index}'
dict_event[f'cost_basis_{name}'] = cost_basis
def _maybe_add_summary(self, events: List[Dict[str, Any]], pnls: PnlTotals) -> None:
"""Depending on given settings, adds a few summary lines at the end of
the all events PnL report"""
if self.settings.pnl_csv_have_summary is False:
return
length = len(events) + 1
template: Dict[str, Any] = {
'type': '',
'notes': '',
'location': '',
'timestamp': '',
'asset': '',
'free_amount': '',
'taxable_amount': '',
'price': '',
'pnl_taxable': '',
'cost_basis_taxable': '',
'pnl_free': '',
'cost_basis_free': '',
}
events.append(template) # separate with 2 new lines
events.append(template)
entry = template.copy()
entry['taxable_amount'] = 'TAXABLE'
entry['price'] = 'FREE'
events.append(entry)
start_sums_index = length + 4
sums = 0
for name, value in pnls.items():
if value.taxable == ZERO and value.free == ZERO:
continue
sums += 1
entry = template.copy()
entry['free_amount'] = f'{str(name)} total'
entry['taxable_amount'] = self._add_sumif_formula(
check_range=f'A2:A{length}',
condition=f'"{str(name)}"',
sum_range=f'I2:I{length}',
actual_value=value.taxable,
)
entry['price'] = self._add_sumif_formula(
check_range=f'A2:A{length}',
condition=f'"{str(name)}"',
sum_range=f'J2:J{length}',
actual_value=value.free,
)
events.append(entry)
entry = template.copy()
entry['free_amount'] = 'TOTAL'
if sums != 0:
entry['taxable_amount'] = f'=SUM(G{start_sums_index}:G{start_sums_index+sums-1})'
entry['price'] = f'=SUM(H{start_sums_index}:H{start_sums_index+sums-1})'
else:
entry['taxable_amount'] = entry['price'] = 0
events.append(entry)
events.append(template) # separate with 2 new lines
events.append(template)
version_result = get_current_version(check_for_updates=False)
entry = template.copy()
entry['free_amount'] = 'rotki version'
entry['taxable_amount'] = version_result.our_version
events.append(entry)
for setting in ACCOUNTING_SETTINGS:
entry = template.copy()
entry['free_amount'] = setting
entry['taxable_amount'] = str(getattr(self.settings, setting))
events.append(entry)
def create_zip(
self,
events: List['ProcessedAccountingEvent'],
pnls: PnlTotals,
) -> Tuple[bool, str]:
# TODO: Find a way to properly delete the directory after send is complete
dirpath = Path(mkdtemp())
success, msg = self.export(events=events, pnls=pnls, directory=dirpath)
if not success:
return False, msg
files: List[Tuple[Path, str]] = [
(dirpath / FILENAME_ALL_CSV, FILENAME_ALL_CSV),
]
with ZipFile(file=dirpath / 'csv.zip', mode='w', compression=ZIP_DEFLATED) as csv_zip:
for path, filename in files:
if not path.exists():
continue
csv_zip.write(path, filename)
path.unlink()
success = False
filename = ''
if csv_zip.filename is not None:
success = True
filename = csv_zip.filename
return success, filename
def to_csv_entry(self, event: 'ProcessedAccountingEvent') -> Dict[str, Any]:
dict_event = event.to_exported_dict(
ts_converter=self.timestamp_to_date,
eth_explorer=self.eth_explorer,
for_api=False,
)
# For CSV also convert timestamp to date
dict_event['timestamp'] = self.timestamp_to_date(event.timestamp)
if self.settings.pnl_csv_with_formulas is False:
return dict_event
# else add formulas
self._add_pnl_type(event=event, dict_event=dict_event, amount_column='F', name='free')
self._add_pnl_type(event=event, dict_event=dict_event, amount_column='G', name='taxable')
return dict_event
def export(
self,
events: List['ProcessedAccountingEvent'],
pnls: PnlTotals,
directory: Path,
) -> Tuple[bool, str]:
serialized_events = [self.to_csv_entry(x) for idx, x in enumerate(events)]
self._maybe_add_summary(events=serialized_events, pnls=pnls)
try:
directory.mkdir(parents=True, exist_ok=True)
_dict_to_csv_file(
directory / FILENAME_ALL_CSV,
serialized_events,
)
except (CSVWriteError, PermissionError) as e:
return False, str(e)
return True, ''
| 35.193878 | 120 | 0.590896 | 8,429 | 0.814632 | 0 | 0 | 0 | 0 | 0 | 0 | 2,262 | 0.218614 |
fa1575f93b6616c8d5798896a41c353c1200f26e | 551 | py | Python | tests/ut/bq_test_kit/interpolators/test_shell_interpolator.py | tiboun/python-bigquery-test-kit | 8f62bdf21122b615f56088a8e2701e0bb4c71f3b | [
"MIT"
] | 31 | 2021-03-03T21:07:44.000Z | 2022-03-20T22:00:45.000Z | tests/ut/bq_test_kit/interpolators/test_shell_interpolator.py | tiboun/python-bq-test-kit | 8f62bdf21122b615f56088a8e2701e0bb4c71f3b | [
"MIT"
] | 14 | 2020-11-25T20:45:31.000Z | 2021-01-29T13:06:28.000Z | tests/ut/bq_test_kit/interpolators/test_shell_interpolator.py | tiboun/python-bq-test-kit | 8f62bdf21122b615f56088a8e2701e0bb4c71f3b | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Bounkong Khamphousone
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from bq_test_kit.interpolators.shell_interpolator import ShellInterpolator
def test_interpolate():
si = ShellInterpolator({"LOCAL_KEY": "VALUE"})
result = si.interpolate("Local key has value ${LOCAL_KEY}."
" Global key has value ${GLOBAL_KEY}", {"GLOBAL_KEY": "G_VALUE"})
assert result == ("Local key has value VALUE."
" Global key has value G_VALUE")
| 36.733333 | 93 | 0.669691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.544465 |
fa17a8836d0b0829d07d117b80ec33b5f3ae92ce | 1,123 | py | Python | Analytics_Deployment/amls/model_deployment/download_model.py | dciborow/Azure-Synapse-Retail-Recommender-Solution-Accelerator | 7ce56d00071bd1f521429dd15ea14c0d0b217008 | [
"MIT"
] | 12 | 2021-02-13T06:23:05.000Z | 2022-03-26T05:17:49.000Z | Analytics_Deployment/amls/model_deployment/download_model.py | dciborow/Azure-Synapse-Retail-Recommender-Solution-Accelerator | 7ce56d00071bd1f521429dd15ea14c0d0b217008 | [
"MIT"
] | 1 | 2021-10-17T00:23:51.000Z | 2021-10-17T00:23:51.000Z | Analytics_Deployment/amls/model_deployment/download_model.py | dciborow/Azure-Synapse-Retail-Recommender-Solution-Accelerator | 7ce56d00071bd1f521429dd15ea14c0d0b217008 | [
"MIT"
] | 13 | 2021-02-13T06:23:07.000Z | 2022-02-25T11:23:24.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os, uuid, sys, pickle, shutil, io, logging
from azure.storage.filedatalake import DataLakeServiceClient
from azure.core._match_conditions import MatchConditions
from azure.storage.filedatalake._models import ContentSettings
from utility_functions.az_storage_reader import *
# Enter the name of the Azure Data Lake Storage Gen2 Account
DATA_LAKE_NAME=""
# Enter the name of the filesystem
DATA_LAKE_FILE_SYSTEM_NAME=""
# Enter the Primary Key of the Data Lake Account
DATA_LAKE_PRIMARY_KEY=""
file_system_client = connect_to_adls(DATA_LAKE_NAME, DATA_LAKE_PRIMARY_KEY, DATA_LAKE_FILE_SYSTEM_NAME)
dirs_to_write = ["itemFactors", "metadata", "userFactors"]
prep_dirs_for_write(dirs_to_write, "retailai_recommendation_model")
for directory in dirs_to_write:
copy_files_from_directory(file_system_client, "user/trusted-service-user/retailai_recommendation_model/"+directory, directory, "retailai_recommendation_model")
shutil.make_archive("retailai_recommendation_model", 'zip', "model\\retailai_recommendation_model") | 48.826087 | 163 | 0.836153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.418522 |
fa17e8b1f375783af0ed86095777f566aaaf4a26 | 15,480 | py | Python | tests/server/test_storage.py | ecoen66/imcsdk | b10eaa926a5ee57cea7182ae0adc8dd1c818b0ab | [
"Apache-2.0"
] | 31 | 2016-06-14T07:23:59.000Z | 2021-09-12T17:17:26.000Z | tests/server/test_storage.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 109 | 2016-05-25T03:56:56.000Z | 2021-10-18T02:58:12.000Z | tests/server/test_storage.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 67 | 2016-05-17T05:53:56.000Z | 2022-03-24T15:52:53.000Z | # Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from nose.tools import assert_equal, raises
from ..connection.info import custom_setup, custom_teardown
from imcsdk.apis.server.storage import _list_to_string
from imcsdk.apis.server.storage import _flatten_list
from imcsdk.apis.server.storage import _flatten_to_string
from imcsdk.apis.server.storage import vd_name_derive
from imcsdk.apis.server.storage import _human_to_bytes
from imcsdk.apis.server.storage import _bytes_to_human
from imcsdk.apis.server.storage import _pd_min_size_get
from imcsdk.apis.server.storage import _pd_total_size_get
from imcsdk.apis.server.storage import _vd_span_depth_get
from imcsdk.apis.server.storage import _raid_max_size_get
from imcsdk.apis.server.storage import virtual_drive_create
from imcsdk.apis.server.storage import virtual_drive_delete
from imcsdk.apis.server.storage import virtual_drive_exists
from imcsdk.apis.server.storage import controller_encryption_enable, \
controller_encryption_disable, controller_encryption_exists, \
controller_encryption_modify_security_key, \
controller_encryption_key_id_generate, controller_encryption_key_generate
from imcsdk.apis.server.storage import \
is_physical_drive_encryption_capable, physical_drive_set_jbod_mode, \
physical_drive_encryption_enable, physical_drive_encryption_disable, \
is_physical_drive_encryption_enabled, physical_drive_get, \
physical_drive_set_unconfigured_good
from imcsdk.imccoreutils import get_server_dn
CONTROLLER_TYPE="SAS"
CONTROLLER_SLOT="SLOT-HBA"
PD_DRIVE_SLOT=4
is_pd_capable = False
def test_list_to_string():
tests = [{"input": [[1]], "expected": '[1]'},
{"input": [[1, 2]], "expected": '[1,2]'},
{"input": [[1, 2], [3, 4]], "expected": '[1,2][3,4]'},
{"input": [[1], [4, 5, 6], [7]], "expected": '[1][4,5,6][7]'}]
for t in tests:
assert_equal(_list_to_string(t["input"]), t["expected"])
def test_flatten_list():
tests = [{"input": [[1]], "expected": [1]},
{"input": [[1, 2]], "expected": [1, 2]},
{"input": [[1, 2], [3, 4]], "expected": [1, 2, 3, 4]}]
for test in tests:
assert_equal(_flatten_list(test["input"]), test["expected"])
@raises(Exception)
def test_flatten_list_error():
_flatten_list([1])
def test_flatten_to_string():
tests = [{"input": [[1]], "expected": '1'},
{"input": [[1, 2]], "expected": '12'},
{"input": [[1, 2], [3, 4]], "expected": '1234'}]
for test in tests:
assert_equal(_flatten_to_string(test["input"]), test["expected"])
def test_vd_name_derive():
tests = [{"dg": [[1]], "raid": 0, "expected": 'RAID0_1'},
{"dg": [[1, 2]], "raid": 1, "expected": 'RAID1_12'},
{"dg": [[1, 2], [3, 4]], "raid": 10, "expected": 'RAID10_1234'}]
for test in tests:
assert_equal(vd_name_derive(test["raid"], test["dg"]),
test["expected"])
def test_human_to_bytes():
tests = [{"input": "1 KB", "expected": 1024},
{"input": "100 MB", "expected": 100 * 1024*1024},
{"input": "121 GB", "expected": 121 * 1024*1024*1024},
{"input": "1 TB", "expected": 1024*1024*1024*1024},
{"input": "1 PB", "expected": 1024*1024*1024*1024*1024},
{"input": "1 EB", "expected": 1024*1024*1024*1024*1024*1024},
{"input": "1 ZB", "expected": 1024*1024*1024*1024*1024*1024*1024},
{"input": "1 YB", "expected": 1024*1024*1024*1024*1024*1024*1024*1024},
{"input": "3814697 MB", "expected": 3814697*1024*1024}]
for test in tests:
assert_equal(_human_to_bytes(test["input"]), test["expected"])
def test_bytes_to_human():
tests = [{"input": 100*1024*1024, "expected": "100 MB"},
{"input": 100*1024*1024*1024, "expected": "100 GB"},
{"input": 100*1024*1024*1024, "format": "MB", "expected": "102400 MB"},
{"input": 3814697*1024*1024, "format": "MB", "expected": "3814697 MB"}]
for test in tests:
if "format" in test:
assert_equal(_bytes_to_human(test["input"], test["format"]), test["expected"])
else:
assert_equal(_bytes_to_human(test["input"]), test["expected"])
def test_pd_min_size_get():
tests = [{"input": [1024*1024, 1024*1024*1024], "expected": 1024*1024},
{"input": [1024*1024*1024, 1024], "expected": 1024},
{"input": [1024*1024*1024, 1024, 1024*10], "expected": 1024}]
for test in tests:
assert_equal(_pd_min_size_get(test["input"]), test["expected"])
def test_pd_total_size_get():
tests = [{"input": [1024*1024, 1024*1024*1024],
"expected": 1024*1024 + 1024*1024*1024},
{"input": [1024*1024*1024, 1024],
"expected": 1024*1024*1024 + 1024},
{"input": [1024*1024*1024, 1024, 1024*10],
"expected": 1024*1024*1024+1024+1024*10}]
for test in tests:
assert_equal(_pd_total_size_get(test["input"]), test["expected"])
def test_vd_spand_depth_get():
tests = [{"input": [[1]], "expected": 1},
{"input": [[1, 2], [3, 4]], "expected": 2},
{"input": [[1, 2, 3], [4], [5, 6]], "expected": 3},
{"input": [[1], [2], [3], [4], [5, 6]], "expected": 5}]
for test in tests:
assert_equal(_vd_span_depth_get(test["input"]), test["expected"])
def test_raid_max_size_get():
tests = [{"r": 0,
"s": 1000*1024*1024*1024,
"ms": 1000*1024*1024*1024,
"sd": 1,
"expected": 1000*1024*1024*1024},
{"r": 1,
"s": 1000*1024*1024*1024,
"ms": 1000*1024*1024*1024,
"sd": 1,
"expected": (1000*1024*1024*1024)/2},
{"r": 5,
"s": 6*1000*1024*1024*1024,
"ms": 1000*1024*1024*1024,
"sd": 2,
"expected": (6*1000*1024*1024*1024) - (2*1*1000*1024*1024*1024)},
{"r": 50,
"s": 6*1000*1024*1024*1024,
"ms": 1000*1024*1024*1024,
"sd": 2,
"expected": (6*1000*1024*1024*1024) - (2*1*1000*1024*1024*1024)},
{"r": 6,
"s": 6*1000*1024*1024*1024,
"ms": 1000*1024*1024*1024,
"sd": 2,
"expected": (6*1000*1024*1024*1024) - (2*2*1000*1024*1024*1024)},
{"r": 60,
"s": 6*1000*1024*1024*1024,
"ms": 1000*1024*1024*1024,
"sd": 2,
"expected": (6*1000*1024*1024*1024) - (2*2*1000*1024*1024*1024)}]
for t in tests:
assert_equal(_raid_max_size_get(t["r"], t["s"], t["ms"], t["sd"]),
t["expected"])
handle = None
def setup_module():
global handle
handle = custom_setup()
def teardown_module():
custom_teardown(handle)
def test_vd_create_delete():
# Guarding check to execute only on servers that have a CONTROLLER_SLOT controller
# and have drive 1-6 present
server_dn = get_server_dn(handle, server_id=1)
slot_dn = server_dn + "/board/storage-SAS-SLOT-MEZZ"
mo = handle.query_dn(slot_dn)
if mo is None:
return
for i in range(1, 7):
mo = handle.query_dn(slot_dn + "/pd-" + str(i))
if mo is None:
return
tests = [{"dg": [[1]], "ct": CONTROLLER_TYPE, "cs": CONTROLLER_SLOT, "r": 0},
{"dg": [[1, 2, 3, 4]], "ct": CONTROLLER_TYPE, "cs": CONTROLLER_SLOT, "r": 1},
{"dg": [[1, 2, 3]], "ct": CONTROLLER_TYPE, "cs": CONTROLLER_SLOT, "r": 5},
{"dg": [[1, 2, 3]], "ct": CONTROLLER_TYPE, "cs": CONTROLLER_SLOT, "r": 6},
{"dg": [[1, 2], [3, 4], [5, 6]], "ct": CONTROLLER_TYPE, "cs": CONTROLLER_SLOT, "r": 10},
{"dg": [[1, 2, 3], [4, 5, 6]], "ct": CONTROLLER_TYPE, "cs": CONTROLLER_SLOT, "r": 50},
{"dg": [[1, 2, 3], [4, 5, 6]], "ct": CONTROLLER_TYPE, "cs": CONTROLLER_SLOT, "r": 60}]
for t in tests:
vd = virtual_drive_create(handle=handle,
drive_group=t["dg"],
controller_type=t["ct"],
controller_slot=t["cs"],
raid_level=t["r"],
self_encrypt=True)
virtual_drive_delete(handle=handle,
controller_slot=t["cs"],
name=vd.virtual_drive_name)
def test_controller_encryption_enable():
controller_encryption_enable(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
key_id='Nbv12345', security_key='Nbv12345')
assert_equal(controller_encryption_exists(handle,
CONTROLLER_TYPE,
CONTROLLER_SLOT)[0],
True)
def test_controller_encryption_modify():
controller_encryption_modify_security_key(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
existing_security_key='Nbv12345',
security_key='Nbv123456')
def test_controller_generated_keys():
key_id = controller_encryption_key_id_generate(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT)
assert_equal(len(key_id) <= 256 , True)
key = controller_encryption_key_generate(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT)
assert_equal(len(key) <= 32, True)
controller_encryption_modify_security_key(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
existing_security_key='Nbv123456',
security_key=key)
'''
def test_controller_jbod_mode_enable():
controller_jbod_mode_enable(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT)
assert_equal(is_controller_jbod_mode_enabled(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT),
True)
'''
def test_pd_jbod_mode_enable():
physical_drive_set_jbod_mode(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
mo = physical_drive_get(handle, controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
assert_equal(mo.drive_state, 'JBOD')
@raises(Exception)
def test_invalid_pd_jbod_mode_enable():
physical_drive_set_jbod_mode(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=3)
def test_pd_encryption_enable():
global is_pd_capable
is_pd_capable = is_physical_drive_encryption_capable(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
if not is_pd_capable:
return
physical_drive_encryption_enable(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
enabled = is_physical_drive_encryption_enabled(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
assert_equal(enabled, True)
def test_pd_set_unconfigured_good():
physical_drive_set_unconfigured_good(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
mo = physical_drive_get(handle, controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
assert_equal(mo.drive_state, 'Unconfigured Good')
def test_pd_encryption_disable():
if not is_pd_capable:
return
physical_drive_encryption_disable(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
enabled = is_physical_drive_encryption_enabled(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
drive_slot=PD_DRIVE_SLOT)
assert_equal(enabled, False)
'''
def test_controller_jbod_mode_disable():
controller_jbod_mode_disable(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT)
assert_equal(is_controller_jbod_mode_enabled(
handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT),
False)
'''
def test_vd_create_delete_with_encryption():
virtual_drive_create(
handle,
drive_group=[[PD_DRIVE_SLOT]],
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
raid_level=0,
self_encrypt=True,
virtual_drive_name='test-vd')
exists, err = virtual_drive_exists(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
virtual_drive_name='test-vd')
assert_equal(exists, True)
time.sleep(2)
virtual_drive_delete(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
name='test-vd')
exists, err = virtual_drive_exists(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT,
virtual_drive_name='test-vd')
assert_equal(exists, False)
def test_controller_encryption_disable():
controller_encryption_disable(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT)
assert_equal(controller_encryption_exists(handle,
controller_type=CONTROLLER_TYPE,
controller_slot=CONTROLLER_SLOT)[0],
False)
| 38.7 | 101 | 0.581848 | 0 | 0 | 0 | 0 | 350 | 0.02261 | 0 | 0 | 3,079 | 0.198902 |
fa19487e15aeeab2574bd34a25e68c329059d835 | 3,274 | py | Python | parsers/demoty.py | discord-advertiser/api | 81b168f6326a67f3af2927bd9ba54c2a3d8c27a0 | [
"MIT"
] | 18 | 2018-06-08T19:39:11.000Z | 2021-06-04T08:25:57.000Z | parsers/demoty.py | discord-advertiser/api | 81b168f6326a67f3af2927bd9ba54c2a3d8c27a0 | [
"MIT"
] | 17 | 2017-12-05T18:24:38.000Z | 2021-06-01T23:49:28.000Z | parsers/demoty.py | discord-advertiser/api | 81b168f6326a67f3af2927bd9ba54c2a3d8c27a0 | [
"MIT"
] | 6 | 2019-03-20T19:29:41.000Z | 2022-01-25T13:08:24.000Z | from parsel import Selector
from utils import (
download,
remove_big_whitespaces_selector,
find_id_in_url,
catch_errors,
get_last_part_url,
)
from data import VideoContent, GalleryContent, ImageContent, Meme, Author, Page
import re
ROOT = "https://m.demotywatory.pl"
def scrap(url):
html = download(url)
return parse(html)
def parse(html):
document = Selector(text=html)
memes = [
catch_errors(parse_meme, element) for element in document.css(".demotivator")
]
memes = [meme for meme in memes if meme is not None]
title = document.css("title::text").get()
next_page_url = "/demotywatory/page/" + get_last_part_url(
document.css("a.next-page::attr(href)").get()
)
return Page(title, memes, next_page_url)
def parse_gallery(html):
title = html.css("a::text").get()
url = html.css("a::attr(href)").get()
slides = []
gallery_html = download(ROOT + url)
gallery_page_document = Selector(text=gallery_html)
for slide_element in gallery_page_document.css(".rsSlideContent"):
slide = slide_element.css("img::attr(src)").get()
slides = slides + [slide]
next_gallery_page_url = gallery_page_document.css(
".gall_next_page > a::attr(href)"
).get()
while next_gallery_page_url is not None:
gallery_html = download(ROOT + url + next_gallery_page_url)
gallery_page_document = Selector(text=gallery_html)
for slide_element in gallery_page_document.css(".rsSlideContent"):
slide = slide_element.css("img::attr(src)").get()
slides = slides + [slide]
next_gallery_page_url = gallery_page_document.css(
".gall_next_page > a::attr(href)"
).get()
slides = [slide for slide in slides if slide is not None]
return (title, url, GalleryContent(slides), None)
def parse_content(html):
clazz = html.attrib["class"]
if "image_gallery" in clazz:
return parse_gallery(html)
elif "image" in clazz or "image_gif" in clazz:
image = html.css("img.demot_pic")
title = image.attrib["alt"]
src = image.attrib["src"].replace("//upl", "/upl")
url = html.css("a::attr(href)").get()
return (title, url, ImageContent(src), None)
elif "video_mp4" in clazz:
src = html.css("source::attr(src)").get().replace("//upl", "/upl")
title = html.css(".demot_title::text").get()
description = html.css(".demot_description::text").get()
url = html.css("a::attr(href)").get()
return (title, url, VideoContent(src), description)
return (None, None, None, None)
def parse_meme(m):
title, url, content, description = parse_content(m)
if url is None:
return
points = None
points_text = m.css(".up_votes::text").get()
try:
points = int(points_text)
except:
pass
comment_count = None
comments_count_text = m.css(".demot-comments a::text").get()
try:
comment_count = int(comments_count_text)
except:
pass
return Meme(
title,
ROOT + url,
"/demotywatory/{}".format(find_id_in_url(url)),
content,
None,
None,
points,
comment_count,
)
| 27.982906 | 85 | 0.626451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 513 | 0.156689 |
fa1c471e585180b23bf9534e145118cd67482d08 | 3,036 | py | Python | evaluate_sklearn.py | syenn2896/batik-recommendation | c1bd88e9e4448d5baa48880524ab9ffa356f5777 | [
"BSD-2-Clause"
] | null | null | null | evaluate_sklearn.py | syenn2896/batik-recommendation | c1bd88e9e4448d5baa48880524ab9ffa356f5777 | [
"BSD-2-Clause"
] | null | null | null | evaluate_sklearn.py | syenn2896/batik-recommendation | c1bd88e9e4448d5baa48880524ab9ffa356f5777 | [
"BSD-2-Clause"
] | null | null | null | import sys
import tables
import numpy as np
import argparse
import pickle
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# config
classifiers = [
LogisticRegression(),
SVC(),
MLPClassifier(),
DecisionTreeClassifier(),
GradientBoostingClassifier(),
RandomForestClassifier(),
]
CV = 10
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate scikit-learn classifiers using extracted dataset features', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('train_file', help="Path to train data (features) input file")
parser.add_argument('test_file', help="Path to test data (features) input file")
parser.add_argument('--output_model', '-o', default='vgg16_best_classifier.pkl', help="Best model output file")
parser.add_argument('--n_folds', type=int, default=CV, help="Number of folds (K) for K-fold cross validation")
args = parser.parse_args()
train_file = args.train_file
test_file = args.test_file
output_model = args.output_model
n_folds = args.n_folds
# loading dataset
print('Loading train dataset: {}'.format(train_file))
train_datafile = tables.open_file(train_file, mode='r')
train_dataset = train_datafile.root
print('Train data: {}'.format((train_dataset.data.nrows,) + train_dataset.data[0].shape))
print('Loading test dataset: {}'.format(test_file))
test_datafile = tables.open_file(test_file, mode='r')
test_dataset = test_datafile.root
print('Test data: {}'.format((test_dataset.data.nrows,) + test_dataset.data[0].shape))
X = np.concatenate((train_dataset.data[:], test_dataset.data[:]), axis=0)
y = np.concatenate((train_dataset.labels[:].argmax(1), test_dataset.labels[:].argmax(1)), axis=0)
# close dataset
train_datafile.close()
test_datafile.close()
print('Cross validation with k={}..'.format(n_folds))
best_classifier = None
best_score = 0.0
best_stdev = 0.0
for classifier in classifiers:
# cross_validate
scores = cross_val_score(classifier, X, y, cv=n_folds)
mean = scores.mean()
stdev = scores.std() * 2
print("{} CV accuracy: {:0.2f} (+/- {:0.2f})".format(type(classifier).__name__, mean, stdev))
# find the best
if (mean > best_score) or (mean == best_score and stdev < best_stdev):
best_classifier = classifier
best_score = mean
best_stdev = stdev
if best_classifier is not None:
print("Saving the best classifer: {} {} +/- {}".format(type(best_classifier).__name__, best_score, best_stdev))
best_classifier.fit(X, y)
pickle.dump(best_classifier, open(output_model, 'wb'))
print("Model saved: {}".format(output_model))
| 38.43038 | 172 | 0.737154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 612 | 0.201581 |
fa1ec522fb870aa12118b32f01be14c44f8786bc | 552 | py | Python | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0019.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | 1 | 2020-07-03T13:54:18.000Z | 2020-07-03T13:54:18.000Z | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0019.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | null | null | null | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0019.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | null | null | null | print('[-- Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faça um programa que ajude ele, lendo o nome deles e escrevendo o nome do escolhido. --]\n')
from random import choice
nome01 = input('Digite o nome do primeiro aluno: ')
nome02 = input('Digite o nome do segundo aluno: ')
nome03 = input('Digite o nome do terceiro aluno: ')
nome04 = input('Digite o nome do quarto aluno: ')
alunos = [nome01,nome02,nome03,nome04]
alunoquemiraapagar = choice(alunos)
print('O aluno escolhido foi: {} ' .format(alunoquemiraapagar))
| 42.461538 | 177 | 0.737319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.607595 |
fa1f18445f40e5b73ddd2287b5c90937270ba682 | 624 | py | Python | setup.py | swdream/flyfingers | 1d2422139d0cb2d64605e89646b693dc86cc4d96 | [
"MIT"
] | 2 | 2015-06-29T09:46:11.000Z | 2015-06-29T23:54:44.000Z | setup.py | swdream/flyfingers | 1d2422139d0cb2d64605e89646b693dc86cc4d96 | [
"MIT"
] | null | null | null | setup.py | swdream/flyfingers | 1d2422139d0cb2d64605e89646b693dc86cc4d96 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import flyfingers
requisites = []
setup(
name='flyfingers',
version=flyfingers.__version__,
description='Learn to type 10 fingers',
scripts=['scripts/flyfingers'],
author='Thanh Nguyen Tuong',
author_email='ngtthanh1010@gmail.com',
packages=['flyfingers'],
url='https://github.com/swdream/flyfingers',
license='MIT',
classifiers=[
'Environment :: Console',
'Topic :: Terminals :: Terminal Emulators/X Terminals',
],
)
| 22.285714 | 63 | 0.661859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.450321 |
fa1f7e2d0ecc70ad4189a83e4a795841b1fe3bd4 | 7,764 | py | Python | kivy_modules/widget/slider.py | VictorManhani/polingua | 16309ef4b25347e2d114749a24dfec5f9696e30f | [
"MIT"
] | null | null | null | kivy_modules/widget/slider.py | VictorManhani/polingua | 16309ef4b25347e2d114749a24dfec5f9696e30f | [
"MIT"
] | null | null | null | kivy_modules/widget/slider.py | VictorManhani/polingua | 16309ef4b25347e2d114749a24dfec5f9696e30f | [
"MIT"
] | null | null | null | __all__ = ('FlexSlider', )
import os
import sys
root = os.path.abspath(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))))
sys.path.insert(0,root)
from kivy.lang import Builder
from kivy_modules.widget.widget import Widget
from kivy.properties import (NumericProperty, AliasProperty, OptionProperty,
ReferenceListProperty, BoundedNumericProperty,
StringProperty, ListProperty, BooleanProperty)
# oldcwd = os.getcwd()
# os.chdir(path)
# module_name = "..__init__"
# class_name = "Builder"
# # klass = getattr(__import__(module_name), class_name)
# # print(klass)
# print(os.listdir())
# mod = __import__(module_name)
# print(mod)
class FlexSlider(Widget):
value = NumericProperty(0.)
min = NumericProperty(0.)
max = NumericProperty(100.)
padding = NumericProperty('16sp') # default: 16sp
orientation = OptionProperty('horizontal', options=(
'vertical', 'horizontal'))
range = ReferenceListProperty(min, max)
step = BoundedNumericProperty(0, min=0)
background_horizontal = StringProperty(
'atlas://data/images/defaulttheme/sliderh_background')
background_disabled_horizontal = StringProperty(
'atlas://data/images/defaulttheme/sliderh_background_disabled')
background_vertical = StringProperty(
'atlas://data/images/defaulttheme/sliderv_background')
background_disabled_vertical = StringProperty(
'atlas://data/images/defaulttheme/sliderv_background_disabled')
background_width = NumericProperty('36sp')
cursor_image = StringProperty(
'atlas://data/images/defaulttheme/slider_cursor')
cursor_disabled_image = StringProperty(
'atlas://data/images/defaulttheme/slider_cursor_disabled')
cursor_width = NumericProperty('32sp')
cursor_height = NumericProperty('32sp')
cursor_size = ReferenceListProperty(cursor_width, cursor_height)
border_horizontal = ListProperty([0, 18, 0, 18])
border_vertical = ListProperty([18, 0, 18, 0])
value_track = BooleanProperty(False)
value_track_color = ListProperty([1, 1, 1, 1])
value_track_width = NumericProperty('3dp')
sensitivity = OptionProperty('all', options=('all', 'handle'))
def on_min(self, *largs):
self.value = min(self.max, max(self.min, self.value))
def on_max(self, *largs):
self.value = min(self.max, max(self.min, self.value))
def get_norm_value(self):
vmin = self.min
d = self.max - vmin
if d == 0:
return 0
return (self.value - vmin) / float(d)
def set_norm_value(self, value):
vmin = self.min
vmax = self.max
step = self.step
val = min(value * (vmax - vmin) + vmin, vmax)
if step == 0:
self.value = val
else:
self.value = min(round((val - vmin) / step) * step + vmin,
vmax)
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'min', 'max'),
cache=True)
def get_value_pos(self):
padding = self.padding
x = self.x
y = self.y
nval = self.value_normalized
if self.orientation == 'horizontal':
return (x + padding + nval * (self.width - 2 * padding), y)
else:
return (x, y + padding + nval * (self.height - 2 * padding))
def set_value_pos(self, pos):
padding = self.padding
x = min(self.right - padding, max(pos[0], self.x + padding))
y = min(self.top - padding, max(pos[1], self.y + padding))
if self.orientation == 'horizontal':
if self.width == 0:
self.value_normalized = 0
else:
self.value_normalized = (x - self.x - padding
) / float(self.width - 2 * padding)
else:
if self.height == 0:
self.value_normalized = 0
else:
self.value_normalized = (y - self.y - padding
) / float(self.height - 2 * padding)
value_pos = AliasProperty(get_value_pos, set_value_pos,
bind=('pos', 'size', 'min', 'max', 'padding',
'value_normalized', 'orientation'),
cache=True)
def on_touch_down(self, touch):
if self.disabled or not self.collide_point(*touch.pos):
return
if touch.is_mouse_scrolling:
if 'down' in touch.button or 'left' in touch.button:
if self.step:
self.value = min(self.max, self.value + self.step)
else:
self.value = min(
self.max,
self.value + (self.max - self.min) / 20)
if 'up' in touch.button or 'right' in touch.button:
if self.step:
self.value = max(self.min, self.value - self.step)
else:
self.value = max(
self.min,
self.value - (self.max - self.min) / 20)
elif self.sensitivity == 'handle':
if self.children[0].collide_point(*touch.pos):
touch.grab(self)
else:
touch.grab(self)
self.value_pos = touch.pos
return True
def on_touch_move(self, touch):
if touch.grab_current == self:
self.value_pos = touch.pos
self.loading_value_pos = touch.pos[0] - 10, touch.pos[1]
return True
def on_touch_up(self, touch):
if touch.grab_current == self:
self.value_pos = touch.pos
return True
Builder.load_string("""
<FlexSlider>:
canvas:
Color:
rgb: 1, 1, 1
RoundedRectangle:
radius: self.border_horizontal if self.orientation == 'horizontal' else self.border_vertical
pos: (self.x + self.padding, self.center_y - self.background_width / 2) if self.orientation == 'horizontal' else (self.center_x - self.background_width / 2, self.y + self.padding)
size: (self.width - self.padding * 2, self.background_width) if self.orientation == 'horizontal' else (self.background_width, self.height - self.padding * 2)
Color:
rgba: root.value_track_color if self.value_track and self.orientation == 'horizontal' else [1, 1, 1, 0]
Line:
width: self.value_track_width
points: self.x + self.padding, self.center_y - self.value_track_width / 2, self.value_pos[0], self.center_y - self.value_track_width / 2
Color:
rgba: root.value_track_color if self.value_track and self.orientation == 'vertical' else [1, 1, 1, 0]
Line:
width: self.value_track_width
points: self.center_x, self.y + self.padding, self.center_x, self.value_pos[1]
Color:
rgb: 1, 1, 1
Label:
canvas:
Color:
rgb: 0, 1, 1
RoundedRectangle:
pos: (root.value_pos[0] - root.cursor_width / 2, root.center_y - root.cursor_height / 2) if root.orientation == 'horizontal' else (root.center_x - root.cursor_width / 2, root.value_pos[1] - root.cursor_height / 2)
size: root.cursor_size
""")
if __name__ == '__main__':
from kivy.app import App
class FlexSliderApp(App):
def build(self):
return FlexSlider(padding=25,
value_track = True,
value_track_color = [1,0,0,1])
FlexSliderApp().run() | 39.815385 | 229 | 0.578053 | 5,314 | 0.684441 | 0 | 0 | 0 | 0 | 0 | 0 | 2,404 | 0.309634 |
fa23b5ad8e2d48f157fde43ab9bbb1141bdb0d96 | 676 | py | Python | tests/conftest.py | tohanss/repobee-sanitizer | d7a22dc51f298857db4f0138c04ffd5f3fe43511 | [
"MIT"
] | null | null | null | tests/conftest.py | tohanss/repobee-sanitizer | d7a22dc51f298857db4f0138c04ffd5f3fe43511 | [
"MIT"
] | 137 | 2020-06-18T14:57:11.000Z | 2022-01-16T15:58:27.000Z | tests/conftest.py | tohanss/repobee-sanitizer | d7a22dc51f298857db4f0138c04ffd5f3fe43511 | [
"MIT"
] | 2 | 2020-06-20T21:47:40.000Z | 2020-06-24T13:04:54.000Z | """Global fixtures and setup code for the test suite."""
import sys
import pathlib
import pytest
import repobee
sys.path.append(str(pathlib.Path(__file__).parent / "helpers"))
@pytest.fixture(autouse=True)
def unregister_plugins():
"""Fixture that automatically unregisters all plugins after each test
function. This is important for the end-to-end tests.
"""
repobee.unregister_all_plugins()
@pytest.fixture
def sanitizer_config(tmpdir):
"""Config file which only specifies sanitizer as a plugin."""
config_file = pathlib.Path(tmpdir) / "sanitizer_config.cnf"
config_file.write_text("[DEFAULTS]\nplugins = sanitizer\n")
yield config_file
| 28.166667 | 73 | 0.745562 | 0 | 0 | 245 | 0.362426 | 493 | 0.72929 | 0 | 0 | 318 | 0.470414 |
fa258850d54e2ff8a1971affb1d60df347f4e149 | 1,945 | py | Python | query_flight/tests/utils/test_sel.py | eskemojoe007/sw_web_app | 92e9b6cd3fedcbbeefc9275cdc49db2fdefaa09e | [
"MIT"
] | null | null | null | query_flight/tests/utils/test_sel.py | eskemojoe007/sw_web_app | 92e9b6cd3fedcbbeefc9275cdc49db2fdefaa09e | [
"MIT"
] | 17 | 2018-06-04T16:02:51.000Z | 2021-06-10T20:26:45.000Z | query_flight/tests/utils/test_sel.py | eskemojoe007/sw_web_app | 92e9b6cd3fedcbbeefc9275cdc49db2fdefaa09e | [
"MIT"
] | null | null | null | import pytest
from query_flight import utils
from query_flight.models import Search, Flight, Layover, Airport
from django.utils import timezone
# @pytest.fixture
# def basic_search():
# return Search.objects.create()
@pytest.fixture
def basic_sw_inputs():
return {'browser': 1, 'originationAirportCode': ['ATL', 'DAL'],
'destinationAirportCode': 'DEN',
'departureDate': timezone.now().date()}
@pytest.mark.django_db
@pytest.mark.parametrize('input,iterable', [
(['ATL', 'BOI', 'DEN'], True),
([1, 2, 3], True),
((1, 2, 3), True),
('string of garbage', False),
(b'string of garbage', False),
(1, False),
])
def test_check_iterable(input, iterable, basic_sw_inputs):
assert utils.SW_Sel_base(
**basic_sw_inputs)._check_iterable(input) == iterable
@pytest.mark.django_db
def test_create_search1(basic_sw_inputs):
search = Search.objects.create()
basic_sw_inputs.update({'search': search})
s = utils.SW_Sel_base(**basic_sw_inputs)
assert s.search is search
assert s.search.id == search.id
@pytest.mark.django_db
def test_create_search2(basic_sw_inputs):
s = utils.SW_Sel_base(**basic_sw_inputs)
assert isinstance(s.search, Search)
assert Search.objects.count() == 1
@pytest.mark.django_db
def test_create_search3(basic_sw_inputs):
basic_sw_inputs.update({'search': 1})
with pytest.raises(ValueError):
s = utils.SW_Sel_base(**basic_sw_inputs)
@pytest.mark.django_db
def test_cases(basic_sw_inputs):
s = utils.SW_Sel_Multiple(**basic_sw_inputs)
assert s.cases[0] == {'departureDate': timezone.now().date(),
'destinationAirportCode': 'DEN',
'originationAirportCode': 'ATL'}
assert s.cases[1] == {'departureDate': timezone.now().date(),
'destinationAirportCode': 'DEN',
'originationAirportCode': 'DAL'}
| 28.188406 | 67 | 0.659126 | 0 | 0 | 0 | 0 | 1,705 | 0.876607 | 0 | 0 | 393 | 0.202057 |
fa2894498609ba22cccb5fff7dcf91feb33619f2 | 4,857 | py | Python | tests/blockchain_tests.py | AoHRuthless/Doubloon | 9279ac7decd434d43bf9b03487691aa52aab499f | [
"Apache-2.0"
] | 1 | 2018-08-13T10:26:39.000Z | 2018-08-13T10:26:39.000Z | tests/blockchain_tests.py | AoHRuthless/Doubloon | 9279ac7decd434d43bf9b03487691aa52aab499f | [
"Apache-2.0"
] | null | null | null | tests/blockchain_tests.py | AoHRuthless/Doubloon | 9279ac7decd434d43bf9b03487691aa52aab499f | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.append(sys.path[0] + '/src')
from unittest import TestCase
from src.blockchain import Blockchain
from src import constant
PUBLIC_KEY = '30819f300d06092a864886f70d010101050003818d0030818902818100d99c9347b6ecd418b1df48012201c5bd2869a707e45dee91a5c63027dc8020210aa4cf6e34e81fc200f29c893add94fefbf37594a964641fc52f8905280c4d93457d4cee5fb216a09a9e8688c62e26bc9e962357c019c5e6c73818f155b87ccaa70059cfa0698c85f5d982bef73bc84e6dfac540cf4f43308b799b8439c1011d0203010001'
PRIVATE_KEY = '3082025b02010002818100d99c9347b6ecd418b1df48012201c5bd2869a707e45dee91a5c63027dc8020210aa4cf6e34e81fc200f29c893add94fefbf37594a964641fc52f8905280c4d93457d4cee5fb216a09a9e8688c62e26bc9e962357c019c5e6c73818f155b87ccaa70059cfa0698c85f5d982bef73bc84e6dfac540cf4f43308b799b8439c1011d02030100010281802b55c5f2a317f888ce6b33909e30122bc02f8206cd507360e7cd56eba93a8eab65ce3a4cad1688b47eb1d1c0764b880f5b273984185398a8c700d75d828328b34bffe18565d9145a0db7aef152a9452642acc0518ccfa224287ba38fabb93a51f0da4db17b82a0ca12b6b69ff1c7b172061ce60ae9665b064ee21490e5cd0215024100db115ac3a95d00bdeabb429f841100d2786ab0849753eed0e0208020e8fe2e5d7e171d69d7552a9adee2840e846e56a6b1452c3a7b7c330f02595b3479f815cf024100fe4c5fe8c71d1e746d83b9bd9021d1fd6027090382321421f432ffabc713fca58cf1d116108e493a7b98854be96c761300a891f281db40ffdb9edc09cb29e15302404ca9f3209c299ef3d7acb6f10a0fc540e2c13b8afb46754205dd79d98a90417b987fd05c54ee4a1daeb888cc67ce1166fe8c9da0cdcc36361f7553f4b6667a830240675e845e0b123b1ef8a5630b3b5b84108ad55344a9d7d1773bdcbf31046b8b7780238bea7c305a73fb69b445774d2f71ea029bd108182803d9326a1f51066521024052b9850ce79b3b2f2eeb481999d65426089fa3680fd35568e5010ba0121e37cf10c64ecc20843a26a09c5d5eefbb35a43061cd33b7adca63965d7dbfcedf6544'
class BlockchainTest(TestCase):
def setUp(self):
self.blockchain = Blockchain()
class BlockchainSetupTests(BlockchainTest):
def test_init(self):
self.assertEqual(self.blockchain.current_transactions, [])
self.assertEqual(self.blockchain.peers, set())
self.assertEqual(len(self.blockchain.chain), 1)
class BlockchainBlockTests(BlockchainTest):
def test_add_block_with_prev_hash_provided(self):
result = self.blockchain.add_block(10, 20)
block_dict = {
'index': 2,
'timestamp': self.blockchain.last_block.timestamp,
'transactions': [],
'proof': 10,
'prev_hash': 20
}
self.assertEqual(result, block_dict)
def test_add_block_with_no_prev_hash_provided(self):
result = self.blockchain.add_block(10)
block_dict = {
'index': 2,
'timestamp': self.blockchain.last_block.timestamp,
'transactions': [],
'proof': 10,
'prev_hash': self.blockchain.chain[0].hash
}
self.assertEqual(result, block_dict)
def test_last_block_points_to_end(self):
self.assertEqual(self.blockchain.last_block,
self.blockchain.chain[0])
self.blockchain.add_block(10, 20)
self.assertEqual(self.blockchain.last_block,
self.blockchain.chain[1])
class BlockchainTransactionTests(BlockchainTest):
def test_add_miner_transaction(self):
result = self.blockchain.add_transaction(constant.MINER_KEY,
'receiver', 3, PRIVATE_KEY)
self.assertEqual(result, 2)
def test_add_regular_transaction_with_valid_private_key_succeeds(self):
result = self.blockchain.add_transaction(PUBLIC_KEY, 'receiver', 3,
PRIVATE_KEY)
self.assertEqual(result, 2)
def test_add_regular_transaction_with_invalid_private_key_fails(self):
result = self.blockchain.add_transaction(PUBLIC_KEY, 'receiver', 3,
PUBLIC_KEY)
self.assertEqual(result, -1)
class BlockchainPeerTests(BlockchainTest):
def test_add_peer_succeeds(self):
result1 = self.blockchain.add_peer('http://127.0.0.1:9000')
result2 = self.blockchain.add_peer('http://127.0.0.1:9001')
self.assertTrue(result1)
self.assertTrue(result2)
self.assertIn('127.0.0.1:9000', self.blockchain.peers)
self.assertIn('127.0.0.1:9001', self.blockchain.peers)
def test_add_peer_fails(self):
result = self.blockchain.add_peer('/127.0.0.1:9000')
self.assertFalse(result)
self.assertNotIn('127.0.0.1:9000', self.blockchain.peers)
def test_peers_idempotent(self):
self.assertEqual(len(self.blockchain.peers), 0)
self.blockchain.add_peer('http://127.0.0.1:9000')
self.assertEqual(len(self.blockchain.peers), 1)
self.blockchain.add_peer('http://127.0.0.1:9000')
self.assertEqual(len(self.blockchain.peers), 1)
class BlockchainProofTests(BlockchainTest):
def test_proof_of_work(self):
self.assertEqual(self.blockchain.proof_of_work(100), 33575) | 50.072165 | 1,230 | 0.774758 | 3,130 | 0.644431 | 0 | 0 | 0 | 0 | 0 | 0 | 1,835 | 0.377805 |
fa298b5e11ded681d1d7dc27673c2c5d79e5b845 | 487 | py | Python | compte/migrations/0003_auto_20210701_1337.py | bzg/acceslibre | 52c7c6990dc132da71a92e856d65f4a983c3b15a | [
"MIT"
] | 8 | 2020-07-23T08:17:28.000Z | 2022-03-09T22:31:36.000Z | compte/migrations/0003_auto_20210701_1337.py | bzg/acceslibre | 52c7c6990dc132da71a92e856d65f4a983c3b15a | [
"MIT"
] | 37 | 2020-07-01T08:47:33.000Z | 2022-02-03T19:50:58.000Z | compte/migrations/0003_auto_20210701_1337.py | bzg/acceslibre | 52c7c6990dc132da71a92e856d65f4a983c3b15a | [
"MIT"
] | 4 | 2021-04-08T10:57:18.000Z | 2022-01-31T13:16:31.000Z | from django.contrib.auth import get_user_model
from django.db import migrations
from compte.models import UserPreferences
def add_preferences_to_users(apps, schema_editor):
users = get_user_model().objects.all()
for user in users:
UserPreferences.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
("compte", "0002_userpreferences"),
]
operations = [
migrations.RunPython(add_preferences_to_users),
]
| 22.136364 | 55 | 0.724846 | 192 | 0.394251 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.061602 |
fa29ea4e3eeb4f0c285cf1d53297cc76c1421a6f | 3,846 | py | Python | mechroutines/es/_routines/hr.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
] | null | null | null | mechroutines/es/_routines/hr.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
] | null | null | null | mechroutines/es/_routines/hr.py | sjklipp/mechdriver | 17c3d9bc82116954b331955e87a60e9adc5e1de9 | [
"Apache-2.0"
] | null | null | null | """ es_runners for coordinate scans
"""
import automol
import elstruct
from mechroutines.es.runner import scan
from mechroutines.es.runner import qchem_params
from mechlib.amech_io import printer as ioprinter
from phydat import phycon
def hindered_rotor_scans(
zma, spc_info, mod_thy_info, thy_save_fs,
scn_run_fs, scn_save_fs,
rotors, tors_model, method_dct,
overwrite,
saddle=False,
increment=30.0*phycon.DEG2RAD,
retryfail=True, chkstab=None):
""" Perform scans over each of the torsional coordinates
"""
if tors_model != '1dhrfa':
script_str, kwargs = qchem_params(
method_dct, job=elstruct.Job.OPTIMIZATION)
scn_typ = 'relaxed'
else:
script_str, kwargs = qchem_params(
method_dct, job=elstruct.Job.ENERGY)
scn_typ = 'rigid'
run_tors_names = automol.rotor.names(rotors)
run_tors_grids = automol.rotor.grids(rotors, increment=increment)
# Set constraints
const_names = automol.zmat.set_constraint_names(
zma, run_tors_names, tors_model)
# Set appropriate value for check stability
# If not set, don't check if saddle=True
if chkstab is None:
chkstab = bool(not saddle)
ioprinter.run_rotors(run_tors_names, const_names)
# for tors_name, tors_grid in zip(tors_names, tors_grids):
for tors_names, tors_grids in zip(run_tors_names, run_tors_grids):
ioprinter.info_message(
'Running Rotor: {}...'.format(tors_names),
newline=1)
# Setting the constraints
constraint_dct = automol.zmat.constraint_dct(
zma, const_names, tors_names)
scan.execute_scan(
zma=zma,
spc_info=spc_info,
mod_thy_info=mod_thy_info,
thy_save_fs=thy_save_fs,
coord_names=tors_names,
coord_grids=tors_grids,
scn_run_fs=scn_run_fs,
scn_save_fs=scn_save_fs,
scn_typ=scn_typ,
script_str=script_str,
overwrite=overwrite,
update_guess=True,
reverse_sweep=True,
saddle=saddle,
constraint_dct=constraint_dct,
retryfail=retryfail,
chkstab=False,
**kwargs,
)
def check_hr_pot(tors_pots, tors_zmas, tors_paths, emax=-0.5, emin=-10.0):
""" Check hr pot to see if a new mimnimum is needed
"""
new_min_zma = None
print('\nAssessing the HR potential...')
for name in tors_pots:
print('- Rotor {}'.format(name))
pots = tors_pots[name].values()
zmas = tors_zmas[name].values()
paths = tors_paths[name].values()
for pot, zma, path in zip(pots, zmas, paths):
if emin < pot < emax:
new_min_zma = zma
emin = pot
print(' - New minimmum energy ZMA found for torsion')
print(' - Ene = {}'.format(pot))
print(' - Found at path: {}'.format(path))
print(automol.zmat.string(zma))
return new_min_zma
# Read and print the potential
# sp_fs = autofile.fs.single_point(ini_cnf_save_path)
# ref_ene = sp_fs[-1].file.energy.read(mod_ini_thy_info[1:4])
# ref_ene = ini_cnf_save_fs[-1].file.energy.read(ini_min_cnf_locs)
# tors_pots, tors_zmas = {}, {}
# for tors_names, tors_grids in zip(run_tors_names, run_tors_grids):
# constraint_dct = automol.zmat.build_constraint_dct(
# zma, const_names, tors_names)
# pot, _, _, _, zmas, _ = filesys.read.potential(
# tors_names, tors_grids,
# ini_cnf_save_path,
# mod_ini_thy_info, ref_ene,
# constraint_dct,
# read_zma=True)
# tors_pots[tors_names] = pot
# tors_zmas[tors_names] = zmas
# # Print potential
# ioprinter.hr_pot(tors_pots)
| 31.268293 | 74 | 0.629225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,236 | 0.321373 |
fa2aef92f1386e419f64a36bf512725d85f56e94 | 1,725 | py | Python | start_training.py | DrInfy/TheHarvester | dd21194ab2220c8edb73352c299d2bfb0f11d7d6 | [
"MIT"
] | 6 | 2020-03-08T21:04:47.000Z | 2021-05-29T07:14:25.000Z | start_training.py | DrInfy/TheHarvester | dd21194ab2220c8edb73352c299d2bfb0f11d7d6 | [
"MIT"
] | 5 | 2020-04-20T08:41:48.000Z | 2021-01-04T18:15:39.000Z | start_training.py | DrInfy/TheHarvester | dd21194ab2220c8edb73352c299d2bfb0f11d7d6 | [
"MIT"
] | 2 | 2021-01-18T21:07:56.000Z | 2021-11-22T15:24:21.000Z | import subprocess
wsl = "wsl python3.7 /mnt/" + YOUR_PATH_TO_HARVESTER
# to = "--timeout 900 -z"
to = "-p2 ai.terran.hard"
to2 = "-p2 ai.zerg.hard"
to3 = "-p2 ai.protoss.hard"
def ai_opponents(difficulty: str) -> str:
text = ""
for race in ["zerg", "protoss", "terran"]:
for build in ["rush", "timing", "power", "air", "air", "macro"]:
text += f"ai.{race}.{difficulty}.{build},"
return text.strip(",")
harvester_test_pattern = (
"harvesterzerg.learning,"
"harvesterzerg.scripted,"
"harvesterzerg.scripted.default.2,"
"harvesterzerg.learning.default.2,"
"harvesterzerg.scripted.default.3,"
"harvesterzerg.learning.default.3,"
"harvesterzerg.scripted.default.4,"
"harvesterzerg.learning.default.4,"
"harvesterzerg.scripted.default.5,"
"harvesterzerg.learning.default.5,"
"harvesterzerg.scripted.default.6,"
"harvesterzerg.learning.default.6,"
"harvesterzerg.scripted.default.7,"
"harvesterzerg.learning.default.7,"
"harvesterzerg.play.default.master,"
).strip(",")
cmd_list_ml = [
# f"{wsl} -p1 harvesterzerg.learning -p2 harvesterzerg.learning.default.2",
# f"{wsl} -p1 harvesterzerg.learning.default.2 -p2 harvesterzerg.learning.default.3",
# f"{wsl} -p1 harvesterzerg.learning -p2 harvesterzerg.learning.default.3",
]
for i in range(0, 15):
cmd_list_ml.append(f'{wsl} -p1 {harvester_test_pattern} -p2 {ai_opponents("hard")}')
for i in range(0, 15):
cmd_list_ml.append(f'{wsl} -p1 {harvester_test_pattern} -p2 {ai_opponents("veryhard")}')
index = 0
for cmd in cmd_list_ml:
index += 1
final_cmd = cmd + " --port " + str(10000 + index * 10)
cmds = final_cmd.split(" ")
subprocess.Popen(cmds)
| 33.173077 | 92 | 0.667826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,094 | 0.634203 |
fa2cad5b5c8ded6db2143b04e6d358eb00bedb04 | 264 | py | Python | agents/train.py | yamamototakas/fxtrading | 955d247b832de7180b8893edaad0b50df515809f | [
"MIT"
] | null | null | null | agents/train.py | yamamototakas/fxtrading | 955d247b832de7180b8893edaad0b50df515809f | [
"MIT"
] | null | null | null | agents/train.py | yamamototakas/fxtrading | 955d247b832de7180b8893edaad0b50df515809f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from trade_results_loader import *
from model import *
loader = TradeResultsLoader()
data = TradeResults(loader.retrieve_trade_data())
with Trainer() as trainer:
trainer.train(10001, data)
trainer.save("./model.ckpt")
| 22 | 50 | 0.685606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.143939 |
fa2e2fc6030354392d7209c8f4bab9713fe2a353 | 1,074 | py | Python | madlib.py | danhuyle508/madlib-cli | 20d50e09a278c441bb7a483f9d2faa331f522655 | [
"MIT"
] | null | null | null | madlib.py | danhuyle508/madlib-cli | 20d50e09a278c441bb7a483f9d2faa331f522655 | [
"MIT"
] | null | null | null | madlib.py | danhuyle508/madlib-cli | 20d50e09a278c441bb7a483f9d2faa331f522655 | [
"MIT"
] | null | null | null | import re
welcome_message = """
Welcome to the Mad Libs game! YOu will be prompted to enter certain types of words. These words will be used in a mad lib and printed out for you.
"""
def fill_mad_lib(file):
new_mad_lib = ''
#import pdb; pdb.set_trace()
with open('text.txt', 'r+') as f:
try:
for line in f:
# use regex to find all instances of{ something }
array_of_word_types = find_all_instances(line)
for i, val in enumerate(array_of_word_types):
user_answer = input('Enter a ' + val + ': ')
line = replace_word(line, array_of_word_types[i], user_answer)
new_mad_lib += line
print(new_mad_lib)
except FileNotFoundError:
print('The file was not found')
def replace_word(line, old_word, new_word):
return line.replace(old_word, new_word, 1)
def find_all_instances(line):
regex = r"{[^{]+}"
return re.findall(regex, line)
if __name__ == '__main__':
fill_mad_lib('text.txt') | 37.034483 | 146 | 0.604283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.293296 |
fa2fa594594f0295d2e1d08269d1cf3f259f882f | 1,852 | py | Python | src/abundance.py | Ilia-Abolhasani/modify_vamb | f164b5d6dd8a8104d115063b86b3d5001dac85b9 | [
"MIT"
] | 111 | 2019-06-22T15:10:06.000Z | 2022-03-29T06:08:27.000Z | src/abundance.py | neptuneyt/vamb | dfd4f005f56471c0aabbe4e977f4cc3dd893e373 | [
"MIT"
] | 86 | 2019-06-22T02:29:30.000Z | 2022-03-31T06:56:18.000Z | src/abundance.py | neptuneyt/vamb | dfd4f005f56471c0aabbe4e977f4cc3dd893e373 | [
"MIT"
] | 32 | 2019-08-28T09:53:18.000Z | 2022-03-26T03:30:52.000Z | import sys
import os
import argparse
import numpy as np
parser = argparse.ArgumentParser(
description="""Command-line bin abundance estimator.
Print the median RPKM abundance for each bin in each sample to STDOUT.
Will read the RPKM file into memory - beware.""",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
parser.add_argument('rpkmpath', help='Path to RPKM file')
parser.add_argument('clusterspath', help='Path to clusters.tsv')
parser.add_argument('headerpath', help='Path to list of headers')
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
args = parser.parse_args()
# Check files
for infile in (args.rpkmpath, args.clusterspath, args.headerpath):
if not os.path.isfile(infile):
raise FileNotFoundError(infile)
# Load Vamb
sys.path.append('../vamb')
import vamb
# Load in files
with open(args.headerpath) as file:
indexof = {line.strip():i for i,line in enumerate(file)}
with open(args.clusterspath) as file:
clusters = vamb.vambtools.read_clusters(file)
# Check that all clusters names are in headers:
for cluster in clusters.values():
for header in cluster:
if header not in indexof:
raise KeyError("Header not found in headerlist: {}".format(header))
# Load RPKM and check it
rpkm = vamb.vambtools.read_npz(args.rpkmpath)
nsamples = rpkm.shape[1]
if len(indexof) != len(rpkm):
raise ValueError("Not the same number of headers as rows in RPKM file")
# Now estimate abundances
for clustername, cluster in clusters.items():
depths = np.empty((len(cluster), nsamples), dtype=np.float32)
for row, header in enumerate(cluster):
index = indexof[header]
depths[row] = rpkm[index]
median_depths = np.median(depths, axis=0)
print(clustername, end='\t')
print('\t'.join([str(i) for i in median_depths]))
| 28.9375 | 79 | 0.714363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.271598 |
fa30edc5de1f7b51fdc4c2d079353d9aba68b489 | 3,343 | py | Python | miamidade/events.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
] | 67 | 2015-04-28T19:28:18.000Z | 2022-01-31T03:27:17.000Z | miamidade/events.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
] | 202 | 2015-01-15T18:43:12.000Z | 2021-11-23T15:09:10.000Z | miamidade/events.py | jayktee/scrapers-us-municipal | ff52a331e91cb590a3eda7db6c688d75b77acacb | [
"MIT"
] | 54 | 2015-01-27T03:15:45.000Z | 2021-09-10T19:35:32.000Z | from pupa.scrape import Scraper
from pupa.scrape import Event
import lxml.html
from datetime import datetime
import pytz
DUPLICATE_EVENT_URLS = ('http://miamidade.gov/wps/Events/EventDetail.jsp?eventID=445731',
'http://miamidade.gov/wps/Events/EventDetail.jsp?eventID=452515',
'http://miamidade.gov/wps/Events/EventDetail.jsp?eventID=452513')
class MiamidadeEventScraper(Scraper):
def lxmlize(self, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
return doc
def scrape(self):
local_timezone = pytz.timezone("US/Eastern")
base_calendar_url = "http://www.miamidade.gov/cob/county-commission-calendar.asp"
#things get messy more than a few months out
#so we're just pulling 3 months. If we want three
#more, they are called "nxx", "nxy" and "nxz"
months = ["cur","nex","nxw"]
for m in months:
doc = self.lxmlize(base_calendar_url + "?next={}".format(m))
events = doc.xpath("//table[contains(@style,'dotted #ccc')]")
for event in events:
rows = event.xpath(".//tr")
for row in rows:
heading, data = row.xpath(".//td")
h = heading.text_content().lower().replace(":","").strip()
if h == "event":
title = data.text_content()
link = data.xpath(".//a")[0].attrib["href"]
elif h == "event date":
when = datetime.strptime(data.text, '%m/%d/%y %H:%M%p')
when = local_timezone.localize(when)
elif h == "location":
where = data.text
elif h == "description":
description = data.text
if link in DUPLICATE_EVENT_URLS:
continue
if title == "Mayor's FY 2016-17 Proposed Budget Public Meeting":
continue
if not description:
description = ""
status = "confirmed"
if "cancelled" in title.lower():
status = "cancelled"
e = Event(name=title,
start_time=when,
timezone="US/Eastern",
location_name=where,
description=description,
status=status)
e.add_source(link)
yield e
e = Event(name="Mayor's FY 2016-17 Proposed Budget Public Meeting",
start_time=local_timezone.localize(datetime.strptime('08/08/16 06:00PM', '%m/%d/%y %H:%M%p')),
timezone="US/Eastern",
location_name='111 NW 1st Street',
description='Pursuant to Section 2-1800A of the County Code, a Public Meeting has been scheduled by the Honorable Carlos A. Gimenez, Mayor, Miami-Dade County, to discuss the FY 2016-17 budget, tax rates, and fee changes.',
status='confirmed')
e.add_source('http://miamidade.gov/wps/Events/EventDetail.jsp?eventID=447192')
yield e
| 43.415584 | 244 | 0.518995 | 2,947 | 0.881544 | 2,744 | 0.82082 | 0 | 0 | 0 | 0 | 1,060 | 0.31708 |
fa31418ce189be6854e296ddbd28f6d7bc22e85e | 344 | py | Python | backend/research_note/migrations/0006_remove_researchnote_is_written.py | andy23512/research-note-system | 42a9d67de07a0f32615c4b9c6505b46e7c852f79 | [
"MIT"
] | null | null | null | backend/research_note/migrations/0006_remove_researchnote_is_written.py | andy23512/research-note-system | 42a9d67de07a0f32615c4b9c6505b46e7c852f79 | [
"MIT"
] | 6 | 2021-06-04T23:01:14.000Z | 2022-02-26T19:57:11.000Z | backend/research_note/migrations/0006_remove_researchnote_is_written.py | andy23512/research-note-system | 42a9d67de07a0f32615c4b9c6505b46e7c852f79 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2019-11-12 16:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('research_note', '0005_auto_20191112_2255'),
]
operations = [
migrations.RemoveField(
model_name='researchnote',
name='is_written',
),
]
| 19.111111 | 53 | 0.610465 | 259 | 0.752907 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.328488 |
fa3198b66f7c5594775466edadbd3a731696a18b | 1,187 | py | Python | Tools/nm_swift_demangle.py | kylefleming/XVim2 | e5544aba5c1f9b778f0c329a56f8075bf1c48d0e | [
"MIT"
] | null | null | null | Tools/nm_swift_demangle.py | kylefleming/XVim2 | e5544aba5c1f9b778f0c329a56f8075bf1c48d0e | [
"MIT"
] | null | null | null | Tools/nm_swift_demangle.py | kylefleming/XVim2 | e5544aba5c1f9b778f0c329a56f8075bf1c48d0e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import shutil
import subprocess
os.chdir("..")
if os.path.exists("tmp"):
shutil.rmtree("tmp")
os.mkdir("tmp")
os.chdir("tmp")
modules = ['/Applications/Xcode.app/Contents/SharedFrameworks/SourceEditor.framework/SourceEditor'
,'/Applications/Xcode.app/Contents/SharedFrameworks/SourceKit.framework/SourceKit']
with open('list.txt', "w") as f3:
for module in modules:
cmd = 'nm' + ' ' + module
with open('cmd.txt', "w") as handle:
subprocess.run(cmd, shell=True, stdout=handle)
with open('cmd.txt') as handle:
for line in handle:
words = line.split()
for word in words:
if len(word) >= 2 and word[0] == '_' and word[1] == '$':
cmd2 = "swift demangle '" + word + "'"
with open('cmd2.txt', "w") as handle2:
subprocess.run(cmd2, shell=True, stdout=handle2)
with open('cmd2.txt') as handle2:
for line2 in handle2:
#print(line2)
f3.write(line2)
| 35.969697 | 98 | 0.518113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.267902 |
fa34161536fc8808c400b42131d5fc16b6159bc6 | 1,319 | py | Python | retag_push.py | atiasn/sync-images | 2485f74259de2412a0d147ef1093b412bfafc3c9 | [
"Apache-2.0"
] | null | null | null | retag_push.py | atiasn/sync-images | 2485f74259de2412a0d147ef1093b412bfafc3c9 | [
"Apache-2.0"
] | null | null | null | retag_push.py | atiasn/sync-images | 2485f74259de2412a0d147ef1093b412bfafc3c9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import os
def get_image_list():
with open('sync_images.txt', 'r') as f:
images = f.readlines()
sync_images = []
for img in images:
img = img.strip()
if 'docker.io/' in img:
sync_images.append(img.replace('docker.io/', ''))
else:
sync_images.append(img)
return sync_images
def retag_push():
sync_images = get_image_list()
ali_registry = 'registry.cn-chengdu.aliyuncs.com'
ali_namespace = 'atiasn'
for img in sync_images:
print(f'基础镜像: {img}')
base_img = 'docker.io/' + img
ali_img = f'{ali_registry}/{ali_namespace}/{img}'
pull_cmd = 'docker pull ' + base_img
retag_cmd = f'docker tag {base_img} ' + ali_img
push_cmd = 'docker push ' + ali_img
print(f'拉取镜像的命令:{pull_cmd}')
code = os.system(pull_cmd)
if code != 0:
raise RuntimeError(f'拉取镜像 {base_img} 失败')
print(f'retag 镜像的命令:{retag_cmd}')
code = os.system(retag_cmd)
if code != 0:
raise RuntimeError(f'retag 镜像 {base_img} 失败')
print(f'push 镜像到阿里云的命令:{push_cmd}')
code = os.system(push_cmd)
if code != 0:
raise RuntimeError(f'push 镜像 {ali_img} 到阿里云失败')
if __name__ == '__main__':
retag_push()
| 26.918367 | 61 | 0.578469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.336872 |
fa35fd9c4c3f40af307596284bdf4287e3dd908d | 638 | py | Python | Python/Buch_ATBS/Teil_2/Kapitel_17_Bildbearbeitung/04_texte_schreiben/04_texte_schreiben.py | Apop85/Scripts | e71e1c18539e67543e3509c424c7f2d6528da654 | [
"MIT"
] | null | null | null | Python/Buch_ATBS/Teil_2/Kapitel_17_Bildbearbeitung/04_texte_schreiben/04_texte_schreiben.py | Apop85/Scripts | e71e1c18539e67543e3509c424c7f2d6528da654 | [
"MIT"
] | 6 | 2020-12-24T15:15:09.000Z | 2022-01-13T01:58:35.000Z | Python/Buch_ATBS/Teil_2/Kapitel_17_Bildbearbeitung/04_texte_schreiben/04_texte_schreiben.py | Apop85/Scripts | 1d8dad316c55e1f1343526eac9e4b3d0909e4873 | [
"MIT"
] | null | null | null | # 04_texte_schreiben.py
# In diesem Beispiel geht es darum Texte in ein Bild zu schreiben mittels ImageFont aus dem Modul PIL
from PIL import Image, ImageFont, ImageDraw
import os
os.chdir(os.path.dirname(__file__))
target_file='.\\text_in_image.png'
if os.path.exists(target_file):
os.remove(target_file)
windows_font_dir='C:\\Windows\\Fonts'
image_object=Image.new('RGBA', (300,300), 'white')
draw=ImageDraw.Draw(image_object)
draw.text((20,150), 'Hello', fill='brown')
arial_font=ImageFont.truetype(windows_font_dir+'\\arial.ttf', 32)
draw.text((80,150), 'World', fill='purple', font=arial_font)
image_object.save(target_file) | 31.9 | 101 | 0.763323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.346395 |
fa3658cc83397f893af5141259e17243f9b55e03 | 4,060 | py | Python | web.py | dujinle/AccountByTornado | ef76be1d8cfffea2797bf024dcb0eaa887ca0aff | [
"Apache-2.0"
] | null | null | null | web.py | dujinle/AccountByTornado | ef76be1d8cfffea2797bf024dcb0eaa887ca0aff | [
"Apache-2.0"
] | null | null | null | web.py | dujinle/AccountByTornado | ef76be1d8cfffea2797bf024dcb0eaa887ca0aff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys, os
import tornado.ioloop
import tornado.web
import tornado.httpserver
import logging
import logging.handlers
import re
from urllib import unquote
import config
from travellers import *
reload(sys)
sys.setdefaultencoding('utf8')
def deamon(chdir = False):
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
print 'fork #1 failed: %d (%s)' % (e.errno, e.strerror)
os._exit(1)
def init():
pass
class DefaultHandler(tornado.web.RequestHandler):
def get(self):
self.write('Travellers Say Hello! (v%s)' % config.VERSION)
class LogHandler(tornado.web.RequestHandler):
def get(self):
log_filename = 'logs/logging'
if not os.path.exists(log_filename):
self.write('The log file is empty.')
return
log_file = None
log_file_lines = None
try:
log_file = open(log_filename, 'r')
if log_file is None:
raise Exception('log_file is None')
log_file_lines = log_file.readlines()
if log_file_lines is None:
raise Exception('log_file_lines is None')
except Exception, e:
logger = logging.getLogger('web')
logger.error('Failed to read the log file (logs/logging), error: %s' % e)
finally:
if log_file is not None:
log_file.close()
if log_file_lines is None:
self.write('Failed to read the log file.')
line_limit = 500
for _ in log_file_lines[::-1]:
line_limit -= 1
if line_limit > 0:
self.write(unquote(_) + '<BR/>')
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"cookie_secret": "SAB8LF2sGBflryMb6eXFkX#ou@CNta9V",
}
routes = [
(r"/", DefaultHandler),
(r"/api/user/authkey", AuthKeyHandler), # Send AuthKey (POST)(JWT)
(r"/api/user/register", RegisterHandler), # Register (POST)(JWT)
(r"/api/user/login", LoginHandler), # Login (POST)(JWT)
(r"/api/user/logout", LogoutHandler), # Logout (POST)
(r"/api/user/reset", ResetHandler), # Reset password (POST)(JWT)
(r"/api/user/forget", ForgetHandler), # Forget password (POST)(JWT)
(r"/api/user/update", UUpdateHandler), # UserUpdate (GET/POST)
(r"/api/user/getuser", GetUserHandler), # GetUser (GET/POST)
(r"/api/user/getall", GetAllMbersHandler), # GetAllUsers (GET/POST)
(r"/api/user/icon", AvatarHandler), # UpdateIcon (GET/POST)
(r"/api/user/geticon", GetIconHandler), # GetIcon (GET/POST)
(r"/api/user/pos", PostionHandler), # Update Pos info (GET/POST)
(r"/api/group/create", AddGroupHandler), # AddGroup (GET/POST)
(r"/api/group/destroy", DelGroupHandler), # DelGroup (GET/POST)
(r"/api/group/join", AddMemberHandler), # AddMember (GET/POST)
(r"/api/group/quit", DelMemberHandler), # DelMember (GET/POST)
(r"/api/group/getgroup", GetMembersHandler), # GerMembers (GET/POST)
(r"/api/group/rename", RenameGroupHandler), # GerMembers (GET/POST)
(r"/api/group/setshare",SetPosShareHandler), # GerMembers (GET/POST)
]
if config.Mode == 'DEBUG':
routes.append((r"/log", LogHandler))
application = tornado.web.Application(routes, **settings)
if __name__ == "__main__":
if '-d' in sys.argv:
deamon()
logdir = 'logs'
if not os.path.exists(logdir):
os.makedirs(logdir)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler = logging.handlers.TimedRotatingFileHandler(
'%s/logging' % logdir, 'M', 20, 360)
handler.suffix = '%Y%m%d%H%M%S.log'
handler.extMatch = re.compile(r'^\d{4}\d{2}\d{2}\d{2}\d{2}\d{2}')
handler.setFormatter(formatter)
logger = logging.getLogger('web')
logger.addHandler(handler)
if config.Mode == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
init()
if '-P' in sys.argv:
http_server = tornado.httpserver.HTTPServer(application)
http_server.bind(8080, '0.0.0.0')
http_server.start() #TODO Based on CPU kernel number
print 'Server is running, listening on port 80....'
tornado.ioloop.IOLoop.instance().start()
else:
http_server = tornado.httpserver.HTTPServer(application)
application.listen(8080)
print 'Server is running, listening on port 80....'
tornado.ioloop.IOLoop.instance().start()
| 31.230769 | 76 | 0.698276 | 978 | 0.240887 | 0 | 0 | 0 | 0 | 0 | 0 | 1,442 | 0.355172 |
fa372cb86b56c891becb57810223ec547518e2ca | 8,739 | py | Python | fluiddb/data/user.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | 3 | 2021-05-10T14:41:30.000Z | 2021-12-16T05:53:30.000Z | fluiddb/data/user.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | null | null | null | fluiddb/data/user.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | 2 | 2018-01-24T09:03:21.000Z | 2021-06-25T08:34:54.000Z | import crypt
import random
import re
from string import ascii_letters, digits
from uuid import uuid4
from storm.locals import (
Storm, DateTime, Int, Unicode, UUID, Reference, AutoReload, RawStr)
from fluiddb.data.exceptions import DuplicateUserError, MalformedUsernameError
from fluiddb.data.store import getMainStore
from fluiddb.util.constant import Constant, ConstantEnum, EnumBase
class Role(EnumBase):
"""User roles.
@cvar ANONYMOUS: A user with the anonymous role only has read-only access
to data in Fluidinfo, unless a permission specifically grants write
access to a particular entity.
@cvar SUPERUSER: A user with the superuser role has read-write access to
all data in Fluidinfo and is not subject to permission checks.
@cvar USER: A user with the user role has read-write access to some data
in Fluidinfo, based on the rules defined by the permission system.
@cvar USER_MANAGER: A user with the user manager role is the same as a
C{USER}, except they can create, update and delete L{User}s.
"""
ANONYMOUS = Constant(1, 'ANONYMOUS')
SUPERUSER = Constant(2, 'SUPERUSER')
USER = Constant(3, 'USER')
USER_MANAGER = Constant(4, 'USER_MANAGER')
DOT_ATOM = r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
QUOTED_STRING = (r"|^\"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011"
r"\013\014\016-\177])*\"")
DOMAIN_STRING = r")@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}$"
EMAIL_REGEXP = re.compile(DOT_ATOM + QUOTED_STRING + DOMAIN_STRING,
re.IGNORECASE)
def validateEmail(obj, attribute, value):
"""Validate a L{User.email} value before storing it in the database.
@param obj: The L{User} instance being updated.
@param attribute: The name of the attribute being set.
@param value: The email address being stored.
@raise ValueError: Raised if the value isn't a valid email address.
@return: The value to store.
"""
if value is not None and not EMAIL_REGEXP.match(value):
raise ValueError('%r is not a valid email address.' % value)
return value
class User(Storm):
"""A user of Fluidinfo.
@param username: The username of the user.
@param passwordHash: The hashed password of the user.
@param fullname: The name of the user.
@param email: The email address for the user.
@param role: The L{Role} for the user.
"""
__storm_table__ = 'users'
id = Int('id', primary=True, allow_none=False, default=AutoReload)
objectID = UUID('object_id', allow_none=False)
role = ConstantEnum('role', enum_class=Role, allow_none=False)
username = Unicode('username', allow_none=False)
passwordHash = RawStr('password_hash', allow_none=False)
fullname = Unicode('fullname', allow_none=False)
email = Unicode('email', validator=validateEmail)
namespaceID = Int('namespace_id')
creationTime = DateTime('creation_time', default=AutoReload)
namespace = Reference(namespaceID, 'Namespace.id')
def __init__(self, username, passwordHash, fullname, email, role):
self.objectID = uuid4()
self.username = username
self.passwordHash = passwordHash
self.fullname = fullname
self.email = email
self.role = role
def isAnonymous(self):
"""Returns C{True} if this user has the anonymous role."""
return self.role == Role.ANONYMOUS
def isSuperuser(self):
"""Returns C{True} if this user has the super user role."""
return self.role == Role.SUPERUSER
def isUser(self):
"""Returns C{True} if this user has the regular user role."""
return self.role == Role.USER
def createUser(username, password, fullname, email=None, role=None):
"""Create a L{User} called C{name} with C{role}.
@param username: A C{unicode} username for the user.
@param password: A C{unicode} password in plain text for the user. The
password will be hashed before being stored. The password will be
disabled if C{None} is provided.
@param email: Optionally, an email address for the user.
@param role: Optionally, a role for the user, defaults to L{Role.USER}.
@raise MalformedUsernameError: Raised if C{username} is not valid.
@raise DuplicateUserError: Raised if a user with the given C{username}
already exists.
@return: A new L{User} instance persisted in the main store.
"""
if not isValidUsername(username):
raise MalformedUsernameError(username)
store = getMainStore()
if store.find(User.id, User.username == username).any():
raise DuplicateUserError([username])
passwordHash = '!' if password is None else hashPassword(password)
role = role if role is not None else Role.USER
return store.add(User(username, passwordHash, fullname, email, role))
def getUsers(usernames=None, ids=None, objectIDs=None):
"""Get L{User}s.
@param usernames: Optionally, a sequence of L{User.username}s to filter
the results with.
@param ids: Optionally, a sequence of L{User.id}s to filter the results
with.
@param objectIDs: Optionally, a sequence of L{User.objectID}s to filter the
result with.
@return: A C{ResultSet} with matching L{User}s.
"""
store = getMainStore()
where = []
if ids:
where.append(User.id.is_in(ids))
if usernames:
where.append(User.username.is_in(usernames))
if objectIDs:
where.append(User.objectID.is_in(objectIDs))
return store.find(User, *where)
# Password hashing code used by the low-level functions for creating users
ALPHABET = ascii_letters + digits
SALT_LENGTH = 8
def hashPassword(password, salt=None):
"""Convert a password string into a secure hash.
This function generates an MD5-hashed password, which consists of three
fields separated by a C{$} symbol:
1. The status of the password. If this field is empty, the user is
enabled, otherwise it's disabled. The C{!} character should be used
when specifying that a user is disabled.
2. The mechanism (1 for MD5, 2a for Blowfish, 5 for SHA-256 and 6 for
SHA-512).
3. The salt.
4. The hashed password.
@param password: The C{unicode} password to be hashed.
@param salt: Optionally, a key to be passed to the L{crypt.crypt} function
to secure against brute-force attacks. Defaults to a random string
and the MD5 hashing algorithm.
@return: A C{str} hash of C{password} generated with C{crypt} algorithm.
"""
# crypt.crypt needs the password to be encoded in ASCII
password = password.encode('utf-8')
if salt is None:
salt = '$1$' + ''.join(random.choice(ALPHABET)
for _ in xrange(SALT_LENGTH))
return crypt.crypt(password, salt)
USERNAME_REGEXP = re.compile(r'^[\:\.\-\w]{1,128}$', re.UNICODE)
def isValidUsername(username):
"""Determine if C{username} is valid.
A username may only contain letters, numbers, and colon, dash, dot and
underscore characters. It can't contain more than 128 characters.
@param path: A C{unicode} username to validate.
@return: C{True} if C{username} is valid, otherwise C{False}.
"""
return (USERNAME_REGEXP.match(username) is not None)
class TwitterUser(Storm):
"""The Twitter UID for a Fluidinfo user.
@param userID: The L{User.id} to link to Twitter.
@param uid: The Twitter UID to link to the L{Fluidinfo} user.
"""
__storm_table__ = 'twitter_users'
userID = Int('user_id', primary=True, allow_none=False)
uid = Int('uid', allow_none=False)
creationTime = DateTime('creation_time', default=AutoReload)
user = Reference(userID, User.id)
def __init__(self, userID, uid):
self.userID = userID
self.uid = uid
def createTwitterUser(user, uid):
"""Create a L{TwitterUser}.
@param user: The L{User} to link to a Twitter account.
@param uid: The Twitter UID for the user.
@return: A new L{TwitterUser} instance persisted in the main store.
"""
store = getMainStore()
return store.add(TwitterUser(user.id, uid))
def getTwitterUsers(uids=None):
"""Get C{(User, TwitterUser)} 2-tuples matching specified Twitter UIDs.
@param uids: Optionally, a sequence of L{TwitterUser.uid}s to filter the
results with.
@return: A C{ResultSet} with matching C{(User, TwitterUser)} 2-tuples.
"""
store = getMainStore()
where = []
if uids:
where.append(TwitterUser.uid.is_in(uids))
return store.find((User, TwitterUser),
User.id == TwitterUser.userID,
*where)
| 36.11157 | 79 | 0.669642 | 2,947 | 0.337224 | 0 | 0 | 0 | 0 | 0 | 0 | 4,876 | 0.557959 |
fa37e3137008518d11130aa95cb3494298511577 | 2,049 | py | Python | marqeta/response_models/business_proprietor_response_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 21 | 2019-04-12T09:02:17.000Z | 2022-02-18T11:39:06.000Z | marqeta/response_models/business_proprietor_response_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 1 | 2020-07-22T21:27:40.000Z | 2020-07-23T17:38:43.000Z | marqeta/response_models/business_proprietor_response_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 10 | 2019-05-08T14:20:37.000Z | 2021-09-20T18:09:26.000Z | from datetime import datetime, date
from marqeta.response_models.address_response_model import AddressResponseModel
from marqeta.response_models.identification_response_model import IdentificationResponseModel
from marqeta.response_models import datetime_object
import json
import re
class BusinessProprietorResponseModel(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def first_name(self):
return self.json_response.get('first_name', None)
@property
def middle_name(self):
return self.json_response.get('middle_name', None)
@property
def last_name(self):
return self.json_response.get('last_name', None)
@property
def alternative_names(self):
return self.json_response.get('alternative_names', None)
@property
def title(self):
return self.json_response.get('title', None)
@property
def home(self):
if 'home' in self.json_response:
return AddressResponseModel(self.json_response['home'])
@property
def ssn(self):
return self.json_response.get('ssn', None)
@property
def dob(self):
if 'dob' in self.json_response:
return datetime_object('dob', self.json_response)
@property
def phone(self):
return self.json_response.get('phone', None)
@property
def email(self):
return self.json_response.get('email', None)
@property
def identifications(self):
if 'identifications' in self.json_response:
return [IdentificationResponseModel(val) for val in self.json_response['identifications']]
def __repr__(self):
return '<Marqeta.response_models.business_proprietor_response_model.BusinessProprietorResponseModel>' + self.__str__()
| 25.936709 | 127 | 0.697413 | 1,763 | 0.86042 | 0 | 0 | 1,304 | 0.636408 | 0 | 0 | 231 | 0.112738 |
fa3a63f302b46c02ff39a3b03b267bd4f406883d | 241 | py | Python | Books/Book/urls.py | qq292/Books | d3b85829592bcbeb87eeccc568e22c510a289487 | [
"MIT"
] | null | null | null | Books/Book/urls.py | qq292/Books | d3b85829592bcbeb87eeccc568e22c510a289487 | [
"MIT"
] | null | null | null | Books/Book/urls.py | qq292/Books | d3b85829592bcbeb87eeccc568e22c510a289487 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path
from django.views.generic import TemplateView
from .views import MainPage
urlpatterns = [
path('admin/', admin.site.urls),
path('', MainPage.as_view(), name='books'),
]
| 21.909091 | 47 | 0.73029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.070539 |
fa3a73830e5fd5eb0ccec1657d21eb8cfd404d56 | 7,304 | py | Python | tests/test_util.py | markusrobertjonsson/learning_simulator | 91d0c37f51f4af6bfe23de1bc9eca25c6bb6f262 | [
"MIT"
] | 1 | 2021-06-11T08:41:17.000Z | 2021-06-11T08:41:17.000Z | tests/test_util.py | markusrobertjonsson/learning_simulator | 91d0c37f51f4af6bfe23de1bc9eca25c6bb6f262 | [
"MIT"
] | 1 | 2020-12-05T19:24:50.000Z | 2021-09-29T14:11:29.000Z | tests/test_util.py | markusrobertjonsson/learning_simulator | 91d0c37f51f4af6bfe23de1bc9eca25c6bb6f262 | [
"MIT"
] | 2 | 2018-09-21T01:07:09.000Z | 2019-03-18T09:43:05.000Z | import unittest
import LsUtil
class TestLsUtil(unittest.TestCase):
def setUp(self):
pass
def iseq(self, d1, d2):
for _, val in d1.items():
val.sort()
for _, val in d2.items():
val.sort()
self.assertEqual(d1, d2)
def test_dict_inv(self):
d = {'a': ['x', 'y'], 'b': ['x', 'y', 'z'], 'c': 'w'}
dinv = LsUtil.dict_inv(d)
expected = {'w': ['c'], 'x': ['b', 'a'], 'y': ['b', 'a'], 'z': ['b']}
self.iseq(dinv, expected)
d = {}
dinv = LsUtil.dict_inv(d)
expected = {}
self.iseq(dinv, expected)
d = {2: '2', '3': '3'}
with self.assertRaises(Exception):
dinv = LsUtil.dict_inv(d)
d = {2: [2, 3, 'j'], '3': ['a', 'b', 'c']}
with self.assertRaises(Exception):
dinv = LsUtil.dict_inv(d)
d = {'A': ''}
with self.assertRaises(Exception):
dinv = LsUtil.dict_inv(d)
d = {'': 'A'}
with self.assertRaises(Exception):
dinv = LsUtil.dict_inv(d)
def test_find_and_cumsum(self):
seq = ['a', 'b', ('a', 'b', 'c'), 'a', ('a',), ('a', 'b'), 'b', ('a', 'b'),
('a', 'b', 'c', 'd'), 'aa', 'bb', ('aa', 'bb', 'cc'), 'cc']
self._test_find_and_cumsum_seq(seq)
# string
findind, cumsum = LsUtil.find_and_cumsum(seq, 'a', True)
self.assertEqual(findind, [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, 'a', False)
self.assertEqual(findind, [1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0])
# tuple, length 1
findind, cumsum = LsUtil.find_and_cumsum(seq, ('a',), True)
self.assertEqual(findind, [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ('a',), False)
self.assertEqual(findind, [0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0])
# tuple, length 2
findind, cumsum = LsUtil.find_and_cumsum(seq, ('a', 'b'), True)
self.assertEqual(findind, [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ('a', 'b'), False)
self.assertEqual(findind, [0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0])
# tuple, length 3
findind, cumsum = LsUtil.find_and_cumsum(seq, ('c', 'a', 'b'), True)
self.assertEqual(findind, [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ('c', 'a', 'b'), False)
self.assertEqual(findind, [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
# list, length 1
findind, cumsum = LsUtil.find_and_cumsum(seq, ['a'], True)
self.assertEqual(findind, [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ['a'], False)
self.assertEqual(findind, [1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a',)], True)
self.assertEqual(findind, [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a',)], False)
self.assertEqual(findind, [0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a', 'b')], True)
self.assertEqual(findind, [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a', 'b')], False)
self.assertEqual(findind, [0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('c', 'a', 'b')], True)
self.assertEqual(findind, [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('c', 'a', 'b')], False)
self.assertEqual(findind, [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
# list, length 2
findind, cumsum = LsUtil.find_and_cumsum(seq, ['a', 'b'], True)
self.assertEqual(findind, [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ['a', 'b'], False)
self.assertEqual(findind, [1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ['b', ('a', 'b')], True)
self.assertEqual(findind, [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ['b', ('a', 'b')], False)
self.assertEqual(findind, [0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ['b', ('b', 'a')], True)
self.assertEqual(findind, [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ['b', ('b', 'a')], False)
self.assertEqual(findind, [0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a', 'b'), 'a'], True)
self.assertEqual(findind, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a', 'b'), 'a'], False)
self.assertEqual(findind, [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a',), ('b', 'a')], True)
self.assertEqual(findind, [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a',), ('b', 'a')], False)
self.assertEqual(findind, [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a',), ('b', 'a'), 'q'], True)
self.assertEqual(findind, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, [('a',), ('b', 'a'), 'q'], False)
self.assertEqual(findind, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ['bb', ('a', 'b')], True)
self.assertEqual(findind, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
findind, cumsum = LsUtil.find_and_cumsum(seq, ['bb', ('a', 'b')], False)
self.assertEqual(findind, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
seq = ['new_trail', 'response', 'context', 'no_response', 'context', 'response',
'context', 'no_response', ('us', 'context'), 'no_response', 'new_trail', 'response',
('cs', 'context'), 'no_response', ('us', 'context'), 'no_response', 'context',
'no_response']
self._test_find_and_cumsum_seq(seq)
def _test_find_and_cumsum_seq(self, seq):
for patternlen in range(1, len(seq) + 1):
for i in range(0, len(seq) + 1 - patternlen):
pattern = seq[i: (i + patternlen)]
findind, cumsum = LsUtil.find_and_cumsum(seq, pattern, True)
self.assertEqual(findind[i], 1)
if patternlen == 1:
findind, cumsum = LsUtil.find_and_cumsum(seq, pattern[0], True)
self.assertEqual(findind[i], 1)
if type(pattern[0]) is tuple:
for t in pattern[0]:
findind, cumsum = LsUtil.find_and_cumsum(seq, t, False)
self.assertEqual(findind[i], 1)
if len(pattern[0]) > 1:
findind, cumsum = LsUtil.find_and_cumsum(seq, t, True)
self.assertTrue(findind[i] != 1)
| 49.020134 | 99 | 0.502464 | 7,270 | 0.995345 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.091867 |
fa3b56e4a1b754b6421ee0737d4108a2bb149d0a | 3,856 | py | Python | code/vrf-chain-sim/find_pattern.py | filecoin-project/consensus | 8824ad5fb8948706995805692d594f6ccf199176 | [
"Apache-2.0",
"MIT"
] | 43 | 2019-02-14T21:02:53.000Z | 2021-12-10T22:53:02.000Z | code/vrf-chain-sim/find_pattern.py | filecoin-project/consensus | 8824ad5fb8948706995805692d594f6ccf199176 | [
"Apache-2.0",
"MIT"
] | 41 | 2019-02-23T02:16:42.000Z | 2020-06-18T20:17:52.000Z | code/vrf-chain-sim/find_pattern.py | filecoin-project/consensus | 8824ad5fb8948706995805692d594f6ccf199176 | [
"Apache-2.0",
"MIT"
] | 4 | 2019-03-27T09:15:53.000Z | 2022-03-25T07:54:18.000Z | import numpy as np
import time
from math import floor
import multiprocessing as mp
import scipy.special
#Initialize parameters
Num_of_sim_per_proc = 1
start_time = time.time()
e = 5.
alpha = 0.33
ntot = 100
na = int(ntot*alpha)
nh = ntot - na
height = 5 #height of the attack
p=float(e)/float(1*ntot)
unrealistic = 0 #do we want to compute the worst case or just the synchronous case?
def multinomial(lst):
res, i = 1, sum(lst)
i0 = lst.index(max(lst))
for a in lst[:i0] + lst[i0+1:]:
for j in range(1,a+1):
res *= i
res //= j
i -= 1
return res
## use multinomial coefficient
def new_node(slot,weight):
return {
'slot': slot,
'weight':weight
}
def print_weight(vec):#given a vector of number of election won at each slot, how many
# "situations" gives a chain weight (i.e. sum of blocks) higher than some number
list_of_nodes = [[new_node(-1,0,)]]
for ind,v in enumerate(vec):
list_of_nodes_at_slot_ind = []
for i in range(v+1):
for node in list_of_nodes[ind]: #take all the nodes from slot before i.e. ind-1
weight = node['weight'] + i
nnode = new_node(ind,weight)
list_of_nodes_at_slot_ind.append(nnode)
list_of_nodes.append(list_of_nodes_at_slot_ind)
dict_of_weight = {i: 0 for i in range(sum(vec)+1)}
for elt in list_of_nodes[-1]:
w = elt['weight']
dict_of_weight[w]+=1
return dict_of_weight
def count_n2(ca):
num = len([x for x in ca if x > 1]) #count number of slot with more than 2 slots
n1 = len([x for x in ca if x != 0])
if n1>1:
num += scipy.special.binom(n1, 2)
return num
def count_n3(ca):
num = 0
n3 = len([x for x in ca if x > 2]) #count number of slot 3 blocks
n2 = len([x for x in ca if x > 1])
num += n3
n1 = len([x for x in ca if x != 0])
if n1>2:
num += scipy.special.binom(n1, 3) # 1 1 1
# 2 1
if n1>0:
#num += scipy.special.binom(n2, 3)
num +=n2*(n1-1)
return num
def count_n5(ca):
num = 0
n5 = len([x for x in ca if x > 4])
n4 = len([x for x in ca if x > 3])
n3 = len([x for x in ca if x > 2]) #count number of slot 3 blocks
n2 = len([x for x in ca if x > 1])
n1 = len([x for x in ca if x != 0])
num += n5 # 5
if n1>4:
num += scipy.special.binom(n1, 5) # 1 1 1 1 1
# 2 3
if n2>1:
num += n3*(n2-1)
# 4 1
if n1>0:
#num += scipy.special.binom(n2, 3)
num +=n4*(n1-1)
# 2 1 1 1
num+=n2*(scipy.special.binom(n1-1, 3))
# 2 2 1
if n1>1 and n2>1:
num += (n1-2)*scipy.special.binom(n2, 2)
#1 1 3
if n3>0:
num+=n3*scipy.special.binom(n1-1,2)
return num
def count_k(k,ca):
#n_i = len([x for x in ca if x >= k])
n = lambda i: len([x for x in ca if x >= i])
n1 = n(1)
#sqr_fun = lambda x: x * x
#n(i)
num = n(k)+scipy.special.binom(n1,k)
for j in range(1,k-1):
num+= n(k-j)*scipy.special.binom(n1-1,j)
return num
def count_n4(ca):
num = 0
n4 = len([x for x in ca if x > 3])
n3 = len([x for x in ca if x > 2]) #count number of slot 3 blocks
n2 = len([x for x in ca if x > 1])
n1 = len([x for x in ca if x != 0])
num += n4 # 4
if n1>3:
num += scipy.special.binom(n1, 4) # 1 1 1 1
# 2 2
if n2>1:
num += scipy.special.binom(n2, 2)
# 3 1
if n1>0:
#num += scipy.special.binom(n2, 3)
num +=n3*(n1-1)
# 2 1 1
num+=n2*(scipy.special.binom(n1-1, 2))
return num
def count_n1(ca):
return len([x for x in ca if x != 0])
def simu(sim):
np.random.seed()#initialise random seed for different processors
wa = []
for i in range(sim):
ca = np.random.binomial(na, p, height)
winners = print_weight(ca)
wa.append(winners)
#ca = np.array(ca)+1
#tot = np.prod(ca)
return wa, count_n2(ca), count_k(2,ca), count_n3(ca), count_k(3,ca),count_n4(ca), count_k(4,ca)
pool = mp.Pool(1)
#print(mp.cpu_count())
results = pool.map(simu, [Num_of_sim_per_proc])
pool.close()
print(results)
print("--- %s seconds ---" % (time.time() - start_time)) | 25.202614 | 97 | 0.616701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 881 | 0.228475 |
fa3bda603647c931c3d1f236ad29db629be3ac37 | 1,478 | py | Python | lib/dblatex-0.3.2/lib/dbtexmf/dblatex/grubber/util.py | jonathanmorley/HR-XSL | 799b1075cbec4cda3d686d588eea92a62d59963f | [
"Apache-2.0"
] | 1 | 2017-12-29T23:23:14.000Z | 2017-12-29T23:23:14.000Z | lib/dblatex-0.3.2/lib/dbtexmf/dblatex/grubber/util.py | jonathanmorley/HR-XSL | 799b1075cbec4cda3d686d588eea92a62d59963f | [
"Apache-2.0"
] | null | null | null | lib/dblatex-0.3.2/lib/dbtexmf/dblatex/grubber/util.py | jonathanmorley/HR-XSL | 799b1075cbec4cda3d686d588eea92a62d59963f | [
"Apache-2.0"
] | null | null | null | # This file is part of Rubber and thus covered by the GPL
# (c) Emmanuel Beffara, 2002--2006
"""
This module contains utility functions and classes used by the main system and
by the modules for various tasks.
"""
try:
import hashlib
except ImportError:
# Fallback for python 2.4:
import md5 as hashlib
import os
from msg import _, msg
def md5_file(fname):
"""
Compute the MD5 sum of a given file.
"""
m = hashlib.md5()
file = open(fname)
for line in file.readlines():
m.update(line)
file.close()
return m.digest()
class Watcher:
"""
Watch for any changes of the files to survey, by checking the file MD5 sums.
"""
def __init__(self):
self.files = {}
def watch(self, file):
if os.path.exists(file):
self.files[file] = md5_file(file)
else:
self.files[file] = None
def update(self):
"""
Update the MD5 sums of all files watched, and return the name of one
of the files that changed, or None of they didn't change.
"""
changed = []
for file in self.files.keys():
if os.path.exists(file):
new = md5_file(file)
if self.files[file] != new:
msg.debug(_("%s MD5 checksum changed") % \
os.path.basename(file))
changed.append(file)
self.files[file] = new
return changed
| 25.482759 | 80 | 0.566982 | 904 | 0.611637 | 0 | 0 | 0 | 0 | 0 | 0 | 564 | 0.381597 |