hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
616100bd2e732fe916b837123e49949956a2ff99 | 3,241 | py | Python | elasticdl/python/tests/elasticdl_job_service_test.py | DLPerf/elasticdl | b9c03ea0e81861ae8d349c3d8ffd1f7b588b910b | [
"MIT"
] | null | null | null | elasticdl/python/tests/elasticdl_job_service_test.py | DLPerf/elasticdl | b9c03ea0e81861ae8d349c3d8ffd1f7b588b910b | [
"MIT"
] | null | null | null | elasticdl/python/tests/elasticdl_job_service_test.py | DLPerf/elasticdl | b9c03ea0e81861ae8d349c3d8ffd1f7b588b910b | [
"MIT"
] | null | null | null | # Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from elasticai_api.proto import elasticai_api_pb2
from elasticdl.python.common.args import parse_master_args
from elasticdl.python.master.elasticdl_job_service import ElasticdlJobService
from elasticdl.python.tests.test_utils import (
DatasetName,
TaskManager,
create_recordio_file,
)
from elasticdl_client.common.constants import DistributionStrategy
class ElasticdlJobServiceTest(unittest.TestCase):
def setUp(self):
self._model_zoo_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../../model_zoo"
)
self.arguments = {
"num_ps_pods": "1",
"num_workers": "2",
"job_type": str(elasticai_api_pb2.TRAINING),
"minibatch_size": "32",
"model_zoo": self._model_zoo_path,
"model_def": "mnist.mnist_functional_api.custom_model",
"job_name": "test",
"worker_image": "ubuntu:18.04",
}
self._num_records = 128
def _get_args(self):
args = []
for key, value in self.arguments.items():
args.append("--" + key)
args.append(value)
return args
def test_create_master_for_allreduce(self):
self.arguments[
"distribution_strategy"
] = DistributionStrategy.ALLREDUCE
with tempfile.TemporaryDirectory() as temp_dir_name:
create_recordio_file(
self._num_records,
DatasetName.TEST_MODULE,
1,
temp_dir=temp_dir_name,
)
self.arguments["training_data"] = temp_dir_name
args = self._get_args()
args = parse_master_args(args)
master = ElasticdlJobService(args, TaskManager(args))
self.assertIsNotNone(master)
def test_create_master_without_eval(self):
self.arguments[
"distribution_strategy"
] = DistributionStrategy.ALLREDUCE
self.arguments["model_def"] = "mnist.mnist_functional_api.custom_model"
with tempfile.TemporaryDirectory() as temp_dir_name:
create_recordio_file(
self._num_records,
DatasetName.TEST_MODULE,
1,
temp_dir=temp_dir_name,
)
self.arguments["training_data"] = temp_dir_name
args = self._get_args()
args = parse_master_args(args)
master = ElasticdlJobService(args, TaskManager(args))
self.assertIsNone(master.evaluation_service)
if __name__ == "__main__":
unittest.main()
| 35.615385 | 79 | 0.648874 | 2,180 | 0.672632 | 0 | 0 | 0 | 0 | 0 | 0 | 920 | 0.283863 |
61613f61f3fc11a9f27623175f5c0fb4554e699d | 24,590 | py | Python | train/train.py | miramirakim227/SwapNeRF_GT | 84444660a7fc8b5f796503d90f3a055889c44389 | [
"BSD-2-Clause"
] | null | null | null | train/train.py | miramirakim227/SwapNeRF_GT | 84444660a7fc8b5f796503d90f3a055889c44389 | [
"BSD-2-Clause"
] | null | null | null | train/train.py | miramirakim227/SwapNeRF_GT | 84444660a7fc8b5f796503d90f3a055889c44389 | [
"BSD-2-Clause"
] | null | null | null | # Training to a set of multiple objects (e.g. ShapeNet or DTU)
# tensorboard logs available in logs/<expname>
import sys
import os
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))
)
import warnings
import trainlib
from model import make_model, loss
from render import NeRFRenderer
from data import get_split_dataset
import util
import numpy as np
import torch.nn.functional as F
import torch
from model import NeuralRenderer
import torchvision.transforms as transforms
from dotmap import DotMap
from PIL import Image
import pdb
from torchvision.utils import save_image, make_grid
warnings.filterwarnings(action='ignore')
def extra_args(parser):
parser.add_argument(
"--batch_size", "-B", type=int, default=32, help="Object batch size ('SB')"
)
parser.add_argument(
"--nviews",
"-V",
type=str,
default="1",
help="Number of source views (multiview); put multiple (space delim) to pick randomly per batch ('NV')",
)
parser.add_argument(
"--freeze_enc",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
parser.add_argument(
"--recon",
type=float,
default=1.,
help="Loss of reconstruction error",
)
parser.add_argument(
"--swap",
type=float,
default=1.,
help="Weights of swap loss error",
)
parser.add_argument(
"--epoch-period",
type=float,
default=1.,
help="period of using discriminator loss",
)
parser.add_argument(
"--disc_lr",
type=float,
default=1.,
help="Discriminator learning rate ratio",
)
parser.add_argument(
"--cam",
type=float,
default=1.,
help="Loss of camera prediction error",
)
parser.add_argument(
"--no_bbox_step",
type=int,
default=100000,
help="Step to stop using bbox sampling",
)
parser.add_argument(
"--fixed_test",
action="store_true",
default=None,
help="Freeze encoder weights and only train MLP",
)
return parser
args, conf = util.args.parse_args(extra_args, training=True, default_ray_batch_size=128)
device = util.get_cuda(args.gpu_id[0])
train_vis_path = os.path.join(args.visual_path, args.name, 'train')
dset, val_dset, _ = get_split_dataset(args.dataset_format, args.datadir)
print(
"dset z_near {}, z_far {}, lindisp {}".format(dset.z_near, dset.z_far, dset.lindisp)
)
# make_model: model에 대한 option.
net = make_model(conf["model"]).to(device=device) # PixelNeRFNet
# conf['renderer']
# renderer {
# n_coarse = 64
# n_fine = 32
# # Try using expected depth sample
# n_fine_depth = 16
# # Noise to add to depth sample
# depth_std = 0.01
# # Decay schedule, not used
# sched = []
# # White background color (false : black)
# white_bkgd = True
# }
# Ours로 변경 예정! # from_config: 모델 세팅 알려줌
renderer = NeRFRenderer.from_conf(conf["renderer"], lindisp=dset.lindisp,).to(
device=device # NeRFRenderer -> renderer setting
)
# Parallize # net: pixelNeRF -> pixelNeRF를
render_par = renderer.bind_parallel(net, args.gpu_id).eval() # -> _RenderWrapper를 선언함 -> 얘의 forward 함수가 class NeRFRenderer 실행하는거!
# self까지도 속성받아버림!
# renderer.bind_parallel -> _RenderWrapper(net, self, simple_output=simple_output)
nviews = list(map(int, args.nviews.split())) # 1.
class PixelNeRFTrainer(trainlib.Trainer):
def __init__(self):
super().__init__(net, dset, val_dset, args, conf["train"], device=device) # superclass에서의 init
self.renderer_state_path = "%s/%s/_renderer" % (
self.args.checkpoints_path,
self.args.name,
)
self.lambda_coarse = conf.get_float("loss.lambda_coarse")
self.lambda_fine = conf.get_float("loss.lambda_fine", 1.0)
print(
"lambda coarse {} and fine {}".format(self.lambda_coarse, self.lambda_fine)
)
fine_loss_conf = conf["loss.rgb"]
if "rgb_fine" in conf["loss"]:
print("using fine loss")
fine_loss_conf = conf["loss.rgb_fine"]
self.rgb_fine_crit = loss.get_rgb_loss(fine_loss_conf, False)
if args.resume:
if os.path.exists(self.renderer_state_path):
renderer.load_state_dict(
torch.load(self.renderer_state_path, map_location=device), strict=False
)
self.z_near = dset.z_near # 일단은 그냥 두기
self.z_far = dset.z_far
self.focal = torch.tensor([2.187719,]) * 10
self.c = torch.tensor([8.000000, 8.000000])
self.use_bbox = args.no_bbox_step > 0
self.recon_loss = torch.nn.MSELoss()
self.cam_loss = torch.nn.MSELoss()
# self.optim.add_param_group({'params': self.neural_renderer.parameters()})
def compute_bce(self, d_out, target):
targets = d_out.new_full(size=d_out.size(), fill_value=target)
loss = F.binary_cross_entropy_with_logits(d_out, targets)
return loss
def post_batch(self, epoch, batch):
renderer.sched_step(args.batch_size)
def extra_save_state(self):
torch.save(renderer.state_dict(), self.renderer_state_path)
def calc_losses_eval(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
# SB: number of batches
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
all_poses = data["poses"].to(device=device)
SB, NV, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
if self.use_bbox and global_step >= args.no_bbox_step:
self.use_bbox = False
print(">>> Stopped using bbox sampling @ iter", global_step)
all_rgb_gt = []
all_rays = []
curr_nviews = nviews[torch.randint(0, len(nviews), ()).item()]
if curr_nviews == 1: # (0,) 을 batch size만큼 만들어준다!
image_ord = torch.randint(0, NV, (SB, 1)) # ours -> 계속 nviews=1일 예정!
else: # Pass
image_ord = torch.empty((SB, curr_nviews), dtype=torch.long)
val_num = 4
##### object마다의 Process
##### 여기서는 RGB sampling하는 과정은 아예 빼고, extrinsic을 통한 camera ray를 가져올 것 pix_inds는 필요없음
for obj_idx in range(SB): # batch 안의 index마다 pose가 다르기 때문! # SB: 4 # meshgrid만 괜찮다면 batch 연산으로 큼지막하게 한번 가도 괜찮을듯
# batch size는 작은 편, 각 sample에 대해서 처리함
# 이거 자체가 하나의 batch로서 기능함
# 너무 메모리가 커서 조금 샘플링 해야할 것 같기도..
indices = torch.randint(0, NV, (val_num,)) # (전체 251개의 view 중 5개 뽑기!)
# 딱 5개만 뽑아냄!
images = all_images[obj_idx][indices] # (NV, 3, H, W) # (50, 3, 128, 128)
poses = all_poses[obj_idx][indices] # (NV, 4, 4) # (50, 4, 4) # <- multi-view rotation
focal = self.focal
c = self.c
if curr_nviews > 1: # Pass
# Somewhat inefficient, don't know better way
image_ord[obj_idx] = torch.from_numpy( # 배치 안의 한 샘플에 대해 5개 중에 하나 뽑기!
np.random.choice(indices, curr_nviews, replace=False) # 0부터 4중에 하나 고르기! <- 각 batch마다 어떤 view에서 source image를 가져올지 결정!
) # ex. image_ord[0] = 2 -> 0번째 샘플의 obj index는 2
images_0to1 = images * 0.5 + 0.5
feat_H, feat_W = 16, 16
# ㅇㅇ 다 넣고 봐도 될 듯. 어차피 feature field에 대해서 보는거라!
cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
rgb_gt_all = images_0to1 # image는 encoder에 들어가는 그대로 넣어주면 됨
rgb_gt_all = (
rgb_gt_all.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (NV * H * W, 3)
# 여기선 Ray sampling을 해서 pix_inds를 얻어내려고 하는데, 우리는 Feature map을 보고 하기 때문에
# pix_inds로 인덱싱해줄 대상이 없음. 그냥 이거 자체를 없애도 됨.
rgb_gt = rgb_gt_all # (ray_batch_size, 3)
rays = cam_rays.view(-1, cam_rays.shape[-1]).to(
device=device # 그냥 어떤 resolution에 대해 생성하기 때문..
) # (ray_batch_size, 8)
all_rgb_gt.append(rgb_gt)
all_rays.append(rays)
all_rgb_gt = torch.stack(all_rgb_gt) # (SB, 5*ray_batch_size, 3) # 5장의 이미지
all_rays = torch.stack(all_rays) # (SB, 5*ray_batch_size, 8)
image_ord = image_ord.to(device) # single-view이기 때문에 어차피 0으로 전부 indexing 되어있음
src_images = util.batched_index_select_nd( # NS: number of samples
all_images, image_ord # 모든 이미지에 대해 랜덤하게 뽑은 source image를 가져오게 됨
) # (SB, NS, 3, H, W) <- NV에서 NS로 바뀜 -> index_select_nd에 따라서 결정됨! <- ㅇㅋ 인정 어차피 한 obj 안에 50개 있으니까
src_poses = util.batched_index_select_nd(all_poses, image_ord) # (SB, NS, 4, 4) <- 이 src poses를 예측해보자!
# 4개의 batch, 각 batch의 NS개 중 일부만 골라서 poses로 처리 <- 오키.. <- 이거는 진짜 camera poses
all_poses = all_images = None
# 각 batch마다 하나의 sample src image를 고름
#######################################################################################
################### 여기까지 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
# remove
############### NeRF encoding하는 부분!!!!!!!!
net.encode(
src_images, # batch, 1, 3, 128, 128
src_poses,
self.focal.to(device=device), # batch
c=self.c.to(device=device) if all_c is not None else None,
)
# 하나의 source image에 대해 5개의 feature output을 만듦 -> 전체 sample에 대해서!
# all_rays: ((SB, ray_batch_size, 8)) <- NV images에서의 전체 rays에 SB만큼을!
feat_out = render_par(all_rays, val_num, want_weights=True, training=False) # models.py의 forward 함수를 볼 것
# render par 함수 밑으로 전부 giraffe renderer로 바꾸기
test_out = net.neural_renderer(feat_out)
# test out 있는 여기에 self.neural_renderer 놓기
loss_dict = {}
test_out_pred = test_out.reshape(SB, -1, 3)
rgb_loss = self.recon_loss(test_out_pred, all_rgb_gt)
loss_dict["rc"] = rgb_loss.item() * args.recon
loss = rgb_loss
loss_dict["t"] = loss.item()
return loss_dict
def calc_losses_train_generator(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
SB, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4)
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
# 원래는 object for문에 껴있었는데 그냥 바로 배치 단위로
images_0to1 = all_images * 0.5 + 0.5
rgb_gt_all = (
images_0to1.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (B, H, W, 3)
# feat-W, feat-H 받아야 함!
feat_H = 16 # <- args로 조정 가능하도록!
feat_W = 16 # <- args로 조정 가능하도록! # 아 오키 이거 volume renderer 세팅 따라가고, 다른 부분 있으면 giraffe 모듈 가져오기
net.encode( # <- encode부분은 동일하게 가져오고, forward하는 부분 좀더 신경써서 가져오기!
all_images,
all_poses,
self.focal.to(device=device),
c=self.c.to(device=device)
) # encoder 결과로 self.rotmat, self.shape, self.appearance 예측됨
################################################
########################### for generated views
cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
all_poses, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
rays = cam_rays.view(SB, -1, cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
featmap = render_par(rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_fake = net.neural_renderer(featmap)
################################################
########################### for swapped views
swap_rot = all_poses.flip(0)
swap_cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
swap_rays = swap_cam_rays.view(SB, -1, swap_cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
swap_featmap = render_par(swap_rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_swap = net.neural_renderer(swap_featmap)
if global_step % self.vis_interval == 0:
image_grid = make_grid(torch.cat((all_images, rgb_fake, rgb_swap), dim=0), nrow=len(all_images)) # row에 들어갈 image 갯수
save_image(image_grid, f'{train_vis_path}/{epoch}_{batch}_out.jpg')
# neural renderer를 저 render par 프로세스 안에 넣기!
# discriminator가 swap을 지날 예정!
d_fake = self.discriminator(rgb_swap)
rgb_loss = self.recon_loss(rgb_fake, all_images) # 아 오키. sampling된 points 갯수가 128개인가보군
# net attribute으로 rotmat있는지 확인 + 예측했던 rotmat과 같은지 확인
gen_swap_loss = self.compute_bce(d_fake, 1)
loss_gen = rgb_loss * args.recon + gen_swap_loss * args.swap
return loss_gen, rgb_loss, gen_swap_loss
def calc_losses_train_discriminator(self, data, epoch=None, batch=None, global_step=0):
#######################################################################################
################### 여기서부터 잘 집중해서 읽어보기! ray 가져오는 부분!!! ########################
#######################################################################################
if "images" not in data:
return {}
all_images = data["images"].to(device=device) # (SB, NV, 3, H, W)
SB, _, H, W = all_images.shape # SB: number of obj, NV: number of view -> 4, 50, 3, 128, 128
all_poses = data["poses"].to(device=device) # (SB, NV, 4, 4)
all_focals = data["focal"] # (SB) # 각 batch sample마다의 focal length가 존재함
all_c = data.get("c") # (SB)
# 원래는 object for문에 껴있었는데 그냥 바로 배치 단위로
images_0to1 = all_images * 0.5 + 0.5
rgb_gt_all = (
images_0to1.permute(0, 2, 3, 1).contiguous().reshape(-1, 3)
) # (B, H, W, 3)
# feat-W, feat-H 받아야 함!
feat_H = 16 # <- args로 조정 가능하도록!
feat_W = 16 # <- args로 조정 가능하도록! # 아 오키 이거 volume renderer 세팅 따라가고, 다른 부분 있으면 giraffe 모듈 가져오기
net.encode( # <- encode부분은 동일하게 가져오고, forward하는 부분 좀더 신경써서 가져오기!
all_images,
all_poses,
self.focal.to(device=device),
c=self.c.to(device=device)
) # encoder 결과로 self.rotmat, self.shape, self.appearance 예측됨
# ################################################
# ########################### for generated views
# cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
# all_poses, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
# ) # (NV, H, W, 8)
# rays = cam_rays.view(SB, -1, cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
# val_num = 1
# featmap = render_par(rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
# rgb_fake = net.neural_renderer(featmap)
################################################
########################### for swapped views
swap_rot = all_poses.flip(0)
swap_cam_rays = util.gen_rays( # 여기서의 W, H 사이즈는 output target feature image의 resolution이어야 함!
swap_rot, feat_W, feat_H, self.focal, self.z_near, self.z_far, self.c # poses에 해당하는 부분이 extrinsic으로 잘 반영되고 있음..!
) # (NV, H, W, 8)
swap_rays = swap_cam_rays.view(SB, -1, swap_cam_rays.shape[-1]).to(device=device) # (batch * num_ray * num_points, 8)
val_num = 1
swap_featmap = render_par(swap_rays, val_num, want_weights=True, training=True,) # <-outputs.toDict()의 결과
rgb_swap = net.neural_renderer(swap_featmap)
# neural renderer를 저 render par 프로세스 안에 넣기!
# discriminator가 swap을 지날 예정!
d_real = self.discriminator(all_images)
d_fake = self.discriminator(rgb_swap.detach())
disc_swap_loss = self.compute_bce(d_fake, 0)
disc_real_loss = self.compute_bce(d_real, 1)
loss_disc = disc_swap_loss * args.swap + disc_real_loss * args.swap
return loss_disc, disc_swap_loss, disc_real_loss
def train_step(self, data, epoch, batch, global_step):
# discriminator가 먼저 update
dict_ = {}
# generator
# dict(net.named_parameters())["neural_renderer.conv_rgb.3.weight"][0,0,0]
# name neural_renderer.conv_rgb.3.weight | param torch.Size([3, 32, 3, 3]) -> [-0.0322, -0.0191, 0.0099]
# discriminator
# name conv_out.weight | param torch.Size([1, 512, 4, 4]) [0, 0, 0] -> [0.0052, 0.0011, 0.0091, 0.0003]
# ([0.0052, 0.0011, 0.0091, 0.0003], device='cuda:0', <- 얘는 왜 안변해..?
if epoch % args.epoch_period == 0:
disc_loss, disc_swap, disc_real = self.calc_losses_train_discriminator(data, epoch=epoch, batch=batch, global_step=global_step)
self.optim_d.zero_grad()
disc_loss.backward()
self.optim_d.step()
dict_['disc_loss'] = round(disc_loss.item(), 3)
dict_['disc_swap'] = round(disc_swap.item(), 3)
dict_['disc_real'] = round(disc_real.item(), 3)
# name neural_renderer.conv_rgb.3.weight : tensor([-0.0322, -0.0191, 0.0099], device='cuda:0', grad_fn=<SelectBackward0>) <- 안바뀜
# generator 그다음에 update
gen_loss, gen_rgb, gen_swap = self.calc_losses_train_generator(data, epoch=epoch, batch=batch, global_step=global_step)
self.optim.zero_grad()
gen_loss.backward()
self.optim.step()
# tensor([-0.0321, -0.0190, 0.0100], device='cuda:0', grad_fn=<SelectBackward0>) <- 바뀜
# tensor([0.0052, 0.0011, 0.0091, 0.0003], device='cuda:0') <- 안바뀜 <- discriminator가 학습이 안되고 있음
dict_['gen_loss'] = round(gen_loss.item(), 3)
dict_['gen_rgb'] = round(gen_rgb.item(), 3)
dict_['gen_swap'] = round(gen_swap.item(), 3)
return dict_
def eval_step(self, data, global_step):
renderer.eval()
losses = self.calc_losses_eval(data, global_step=global_step)
renderer.train()
return losses
# 얘네는 기존의 data loader 그대로 활용하도록 고고
def vis_step(self, data, global_step, epoch, batch, idx=None):
if "images" not in data:
return {}
if idx is None:
batch_indices = np.random.randint(0, data["images"].shape[0], 4) # 16 = batch -> (16, 251, 3, 128, 128)
else:
print(idx)
batch_indices = idx
total_psnr = 0
cat_list = []
for batch_idx in batch_indices:
# 16개 batch objects 중에 하나의 batch index를
images = data["images"][batch_idx].to(device=device) # (NV, 3, H, W)
poses = data["poses"][batch_idx].to(device=device) # (NV, 4, 4)
focal = self.focal # (1)
c = self.c
feat_H, feat_W = 16, 16
NV, _, H, W = images.shape
cam_rays = util.gen_rays( # (251개의 poses에 대해서 만듦..)
poses, feat_W, feat_H, focal, self.z_near, self.z_far, c=c # (251, 16, 16, 8)
) # (NV, H, W, 8)
images_0to1 = images * 0.5 + 0.5 # (NV, 3, H, W) # (251, 3, 128, 128)
val_num = 3
# curr_nviews를 4개로 잡아볼까
curr_nviews = nviews[torch.randint(0, len(nviews), (1,)).item()] # curr_nviews = 1
views_src = np.sort(np.random.choice(NV, curr_nviews, replace=False)) # NV: 251 -> ex.views_src: 여러 이미지들 나오는디요 시발
view_dests = np.random.randint(0, NV - curr_nviews, val_num) # ex. 63
for vs in range(curr_nviews):
view_dests += view_dests >= views_src[vs]
views_src = torch.from_numpy(views_src)
# set renderer net to eval mode
renderer.eval() # <- encoder는 왜 eval() 아니지 # renderer의 parameter 찾고 여기에 2DCNN 포함되는지 확인!
source_views = (
images_0to1[views_src].repeat(val_num, 1, 1, 1)
.permute(0, 2, 3, 1)
.cpu()
.numpy()
.reshape(-1, H, W, 3) # (3, 128, 128, 3)
)
gt = images_0to1[view_dests].permute(0, 2, 3, 1).cpu().numpy().reshape(val_num, H, W, 3) # (128, 128, 3)
with torch.no_grad(): # cam_rays: (NV, 16, 16, 8)
test_rays_dest = cam_rays[view_dests] # (3, H, W, 8) # -> (val_num, 16, 16, 8)
test_rays_src = cam_rays[views_src].repeat(val_num, 1, 1, 1) # (H, W, 8) # -> (16, 16, 8)
test_images_src = images[views_src].repeat(val_num, 1, 1, 1) # (NS, 3, H, W) # -> (3, 128, 128)
test_images_dest = images[view_dests] # -> # -> (val_num, 3, 128, 128)
net.encode(
test_images_src, # (val_num, 3, 128, 128)
poses[views_src].repeat(val_num, 1, 1), # (val_num, 4, 4)
self.focal.to(device=device),
c=self.c.to(device=device),
)
test_rays_dest = test_rays_dest.reshape(val_num, feat_H * feat_W, -1) # -> (1, 16*16, 8)
test_rays_src = test_rays_src.reshape(val_num, feat_H * feat_W, -1) # -> (1, 16*16, 8)
# test_rays: 1, 16x16, 8
feat_test_dest = render_par(test_rays_dest, val_num = 1, want_weights=True) # -> (1, 16*16, 8)
out_dest = net.neural_renderer(feat_test_dest)
feat_test_src = render_par(test_rays_src, val_num = 1, want_weights=True) # -> (1, 16*16, 8)
out_src = net.neural_renderer(feat_test_src)
rgb_psnr = out_dest.cpu().numpy().reshape(val_num, H, W, 3)
# for vals calculation
psnr = util.psnr(rgb_psnr, gt)
total_psnr += psnr
# source views, gt, test_out
cat = torch.cat((test_images_src[[0]], test_images_dest.reshape(-1, 3, H, W), out_src[[0]].clamp_(0., 1.), out_dest.reshape(-1, 3, H, W).clamp_(0., 1.)), dim=0)
cat_list.append(cat)
# new_cat = torch.stack(cat_list, dim=0).reshape(-1, 3, 128, 128)
new_cat = torch.cat(cat_list, dim=0)
image_grid = make_grid(new_cat, nrow=len(cat)) # row에 들어갈 image 갯수
save_image(image_grid, f'visuals/{args.name}/{epoch}_{batch}_out.jpg')
vals = {"psnr": total_psnr / len(batch_indices)}
print("psnr", total_psnr / len(batch_indices))
# set the renderer network back to train mode
renderer.train()
return None, vals
trainer = PixelNeRFTrainer()
trainer.start()
| 42.98951 | 176 | 0.551037 | 23,126 | 0.862717 | 0 | 0 | 0 | 0 | 0 | 0 | 11,534 | 0.430277 |
61633ee4b11cae72781872ad72fdde6424de3acc | 240,628 | py | Python | Calculation.py | atranel/resqdb | 76b8a5089732ae63c867b734c5053908687122bc | [
"MIT"
] | null | null | null | Calculation.py | atranel/resqdb | 76b8a5089732ae63c867b734c5053908687122bc | [
"MIT"
] | null | null | null | Calculation.py | atranel/resqdb | 76b8a5089732ae63c867b734c5053908687122bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 09 13:28:05 2017
@author: Marie Jankujova
"""
import sys
import os
from datetime import datetime, time, date
import pandas as pd
import numpy as np
from numpy import inf
import pytz
import logging
import scipy.stats as st
from scipy.stats import sem, t
from scipy import mean
class FilterDataset:
""" The class filtrating the dataframe by date or by country.
:param df: the dataframe containing preprocessed data
:type df: dataframe
:param country: the country code to be included in the data
:type country: str
:param date1: the first date included in the filtered dataframe
:type date1: date
:param date2: the last date included in the filtered dataframe
:type date2: date
:param column: the column used as main for filtration
:type column: str
:param by_columns: True if data should be filtered by hospital and discharge date together
:type by_columns: boolean
"""
def __init__(self, df, country=None, date1=None, date2=None, column='DISCHARGE_DATE', by_columns=False):
debug = 'debug_' + datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
self.fdf = df.copy()
self.country = country
self.date1 = date1
self.date2 = date2
if self.country is not None:
# Append "_" to the country code, because e.g. ES_MD was included in dataset for MD as well.
country = self.country + "_"
self.fdf = self._filter_by_country()
logging.info('FilterDataset: Data have been filtered for country {0}!'.format(self.country))
if self.date1 is not None and self.date2 is not None:
if not by_columns:
if column == 'DISCHARGE_DATE':
self.fdf = self._filter_by_date()
logging.info('FilterDataset: Data have been filtered for date {0} - {1}!'.format(self.date1, self.date2))
elif column == 'HOSPITAL_DATE':
self.fdf = self._filter_by_hospital_date()
logging.info('FilterDataset: Data have been filtered by hospital date for dates {} - {}!'.format(self.date1, self.date2))
else:
self.fdf = self._filter_by_hospital_and_discharge_date()
logging.info('FilterDataset: Data have been filtered by hospital or discharge date for dates {} - {}!'.format(self.date1, self.date2))
def _filter_by_country(self):
""" The function filtering dataframe by country.
:returns: df -- the dataframe including only rows containing in Protocol ID the country code
"""
df = self.fdf[self.fdf['Protocol ID'].str.startswith(self.country) == True].copy()
return df
def _filter_by_date(self):
""" The function filtering dataframe by discharge date.
:returns: df -- the dataframe including only rows where discharge date is in the period (date1, date2)
"""
df = self.fdf[(self.fdf['DISCHARGE_DATE'] >= self.date1) & (self.fdf['DISCHARGE_DATE'] <= self.date2)].copy()
return df
def _filter_by_hospital_date(self):
''' The function filtering dataframe by admission date.
:returns df: the dataframe including only rows where admission date is between these two days
'''
df = self.fdf[(self.fdf['HOSPITAL_DATE'] >= self.date1) & (self.fdf['HOSPITAL_DATE'] <= self.date2)].copy()
return df
def _filter_by_hospital_and_discharge_date(self):
''' The function filters dataframe by admission and discharge date. Eg. include patient if hospital date or discharge date are in the range.
'''
df = self.fdf[((self.fdf['HOSPITAL_DATE'] >= self.date1) & (self.fdf['HOSPITAL_DATE'] <= self.date2)) | ((self.fdf['DISCHARGE_DATE'] >= self.date1) & (self.fdf['DISCHARGE_DATE'] <= self.date2))].copy()
return df
class ComputeStats:
""" The class calculating the general statistics from the preprocessed and filtered data.
:param df: the dataframe containing preprocessed data
:type df: dataframe
:param country: the results for whole country included in the statistics
:type country: bool
:param country_code: the country code used in the names of output files
:type country_code: str
:param comparison: the value saying if it is comparative statistics
:type comparison: bool
:param patient_limit: the number of patients used as limit when evaluating angels awards (default is 30)
:type patiet_limit: int
:param period: the name of the period (default is None)
:type period: str
"""
def __init__(self, df, country = False, country_code = "", comparison=False, patient_limit=30, period=None, raw_data=None):
self.df = df.copy()
self.df.fillna(0, inplace=True)
self.patient_limit = patient_limit
self.period = period
self.raw_data = raw_data
# Rename 'RES-Q reports name' column to 'Site Name'
if 'ESO Angels name' in self.df.columns:
self.df.drop('Site Name', inplace=True, axis=1)
self.df.rename(columns={'ESO Angels name': 'Site Name'}, inplace=True)
def get_country_name(value):
""" The function returning the country name based on country code.
:returns: country_name -- name of the country
"""
if value == "UZB":
value = 'UZ'
country_name = pytz.country_names[value]
return country_name
#if comparison == False:
#self.df['Protocol ID'] = self.df.apply(lambda row: row['Protocol ID'].split()[2] if (len(row['Protocol ID'].split()) == 3) else row['Protocol ID'].split()[0], axis=1)
# uncomment if you want stats between countries and set comparison == True
# self.df['Protocol ID'] = self.df.apply(lambda x: x['Protocol ID'].split("_")[0], axis=1)
# If you want to compare, instead of Site Names will be Country names.
if comparison:
self.df['Protocol ID'] = self.df['Country']
self.df['Site Name'] = self.df['Country']
#if self.df['Protocol ID'].dtype == np.object:
#self.df['Site Name'] = self.df.apply(lambda x: get_country_name(x['Protocol ID']) if get_country_name(x['Protocol ID']) != "" else x['Protocol ID'], axis=1)
if (country):
country_df = self.df.copy()
#self.country_name = pytz.country_names[country_code]
# country['Protocol ID'] = self.country_name
#country['Site Name'] = self.country_name
country_df['Protocol ID'] = country_df['Country']
country_df['Site Name'] = country_df['Country']
self.df = pd.concat([self.df, country_df])
self._country_name = country_df['Country'].iloc[0]
else:
self._country_name = ""
self.statsDf = self.df.groupby(['Protocol ID', 'Site Name']).size().reset_index(name="Total Patients")
# self.statsDf['Site Name'] =
self.statsDf = self.statsDf[['Protocol ID', 'Site Name', 'Total Patients']]
self.statsDf['Median patient age'] = self.df.groupby(['Protocol ID']).AGE.agg(['median']).rename(columns={'median': 'Median patient age'})['Median patient age'].tolist()
# get patietns with ischemic stroke (ISch) (1)
isch = self.df[self.df['STROKE_TYPE'].isin([1])]
self.statsDf['isch_patients'] = self._count_patients(dataframe=isch)
# get patietns with ischemic stroke (IS), intracerebral hemorrhage (ICH), transient ischemic attack (TIA) or cerebral venous thrombosis (CVT) (1, 2, 3, 5)
is_ich_tia_cvt = self.df[self.df['STROKE_TYPE'].isin([1, 2, 3, 5])]
self.statsDf['is_ich_tia_cvt_patients'] = self._count_patients(dataframe=is_ich_tia_cvt)
# get patietns with ischemic stroke (IS), intracerebral hemorrhage (ICH), or cerebral venous thrombosis (CVT) (1, 2, 5)
is_ich_cvt = self.df[self.df['STROKE_TYPE'].isin([1, 2, 5])]
self.statsDf['is_ich_cvt_patients'] = self._count_patients(dataframe=is_ich_cvt)
# Get dataframe with patients who had ischemic stroke (IS) or intracerebral hemorrhage (ICH)
is_ich = self.df[self.df['STROKE_TYPE'].isin([1,2])]
self.statsDf['is_ich_patients'] = self._count_patients(dataframe=is_ich)
# get patietns with ischemic stroke (IS) and transient ischemic attack (TIA) (1, 3)
is_tia = self.df[self.df['STROKE_TYPE'].isin([1, 3])]
self.statsDf['is_tia_patients'] = self._count_patients(dataframe=is_tia)
# get patietns with ischemic stroke (IS), intracerebral hemorrhage (ICH), subarrachnoid hemorrhage (SAH) or cerebral venous thrombosis (CVT) (1, 2, 4, 5)
is_ich_sah_cvt = self.df[self.df['STROKE_TYPE'].isin([1, 2, 4, 5])]
self.statsDf['is_ich_sah_cvt_patients'] = self._count_patients(dataframe=is_ich_sah_cvt)
# get patietns with ischemic stroke (IS), transient ischemic attack (TIA) or cerebral venous thrombosis (CVT) (1, 3, 5)
is_tia_cvt = self.df[self.df['STROKE_TYPE'].isin([1, 3, 5])]
self.statsDf['is_tia_cvt_patients'] = self._count_patients(dataframe=is_tia_cvt)
# get patients with cerebral venous thrombosis (CVT) (5)
cvt = self.df[self.df['STROKE_TYPE'].isin([5])]
self.statsDf['cvt_patients'] = self._count_patients(dataframe=cvt)
# get patietns with intracerebral hemorrhage (ICH) and subarrachnoid hemorrhage (SAH) (2, 4)
ich_sah = self.df[self.df['STROKE_TYPE'].isin([2, 4])]
self.statsDf['ich_sah_patients'] = self._count_patients(dataframe=ich_sah)
# get patietns with intracerebral hemorrhage (ICH) (2)
ich = self.df[self.df['STROKE_TYPE'].isin([2])]
self.statsDf['ich_patients'] = self._count_patients(dataframe=ich)
# get patietns with subarrachnoid hemorrhage (SAH) (4)
sah = self.df[self.df['STROKE_TYPE'].isin([4])]
self.statsDf['sah_patients'] = self._count_patients(dataframe=sah)
# create subset with no referrals (RECANALIZATION_PROCEDURE != [5,6]) AND (HEMICRANIECTOMY != 3)
discharge_subset = self.df[~self.df['RECANALIZATION_PROCEDURES'].isin([5, 6]) & ~self.df['HEMICRANIECTOMY'].isin([3])]
self.statsDf['discharge_subset_patients'] = self._count_patients(dataframe=discharge_subset)
# Create discharge subset alive
discharge_subset_alive = self.df[~self.df['DISCHARGE_DESTINATION'].isin([5])]
self.statsDf['discharge_subset_alive_patients'] = self._count_patients(dataframe=discharge_subset_alive)
##########
# GENDER #
##########
self.tmp = self.df.groupby(['Protocol ID', 'GENDER']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="GENDER", value=2, new_column_name='# patients female')
self.statsDf['% patients female'] = self.statsDf.apply(lambda x: round(((x['# patients female']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="GENDER", value=1, new_column_name='# patients male')
self.statsDf['% patients male'] = self.statsDf.apply(lambda x: round(((x['# patients male']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
# tag::prenotification[]
####################
# PRE-NOTIFICATION #
####################
pt_3_form_version = self.df.loc[self.df['crf_parent_name'] == 'F_RESQV20DEV_PT_3'].copy()
self.statsDf['pt_3_form_total_patients'] = self._count_patients(dataframe=pt_3_form_version)
if not pt_3_form_version.empty:
if country_code == 'PT':
# prenotification
column = 'PRENOTIFICATION'
if column in df.columns:
self.tmp = pt_3_form_version.groupby(['Protocol ID', column]).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name=column, value=1, new_column_name='# pre-notification - Yes')
self.statsDf['% pre-notification - Yes'] = self.statsDf.apply(lambda x: round(((x['# pre-notification - Yes']/x['pt_3_form_total_patients']) * 100), 2) if x['pt_3_form_total_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name=column, value=2, new_column_name='# pre-notification - No')
self.statsDf['% pre-notification - No'] = self.statsDf.apply(lambda x: round(((x['# pre-notification - No']/x['pt_3_form_total_patients']) * 100), 2) if x['pt_3_form_total_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name=column, value=3, new_column_name='# pre-notification - Not known')
self.statsDf['% pre-notification - Not known'] = self.statsDf.apply(lambda x: round(((x['# pre-notification - Not known']/x['pt_3_form_total_patients']) * 100), 2) if x['pt_3_form_total_patients'] > 0 else 0, axis=1)
del column
# end::prenotification[]
# tag::mrs_prior_stroke[]
####################
# MRS PRIOR STROKE #
####################
if country_code == 'PT':
# MRS prior to stroke
column = 'MRS_PRIOR_STROKE'
if column in df.columns:
# modify values to represent real values of mRS eg. 1 -> 0 etc.
pt_3_form_version.loc[:, 'ADJUSTED_MRS_PRIOR_STROKE'] = pt_3_form_version[column] - 1
# now our unknown is 7
prior_mrs_known = pt_3_form_version.loc[~pt_3_form_version[column].isin([7])].copy()
self.statsDf = self.statsDf.merge(prior_mrs_known.groupby(['Protocol ID']).ADJUSTED_MRS_PRIOR_STROKE.agg(['median']).rename(columns={'median': 'Median mRS prior to stroke'})['Median mRS prior to stroke'].reset_index(), how='outer')
del column
# end::mrs_prior_stroke[]
del pt_3_form_version
self.statsDf.drop(['pt_3_form_total_patients'], inplace=True, axis=1)
######################
# STROKE IN HOSPITAL #
######################
self.tmp = self.df.groupby(['Protocol ID', 'HOSPITAL_STROKE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="HOSPITAL_STROKE", value=1, new_column_name='# patients having stroke in the hospital - Yes')
self.statsDf['% patients having stroke in the hospital - Yes'] = self.statsDf.apply(lambda x: round(((x['# patients having stroke in the hospital - Yes']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="HOSPITAL_STROKE", value=2, new_column_name='# patients having stroke in the hospital - No')
self.statsDf['% patients having stroke in the hospital - No'] = self.statsDf.apply(lambda x: round(((x['# patients having stroke in the hospital - No']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
####################
# RECURRENT STROKE #
####################
self.tmp = self.df.groupby(['Protocol ID', 'RECURRENT_STROKE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="RECURRENT_STROKE", value=-999, new_column_name='tmp')
self.statsDf = self._get_values_for_factors(column_name="RECURRENT_STROKE", value=1, new_column_name='# recurrent stroke - Yes')
self.statsDf['% recurrent stroke - Yes'] = self.statsDf.apply(lambda x: round(((x['# recurrent stroke - Yes']/(x['Total Patients'] - x['tmp'])) * 100), 2) if (x['Total Patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECURRENT_STROKE", value=2, new_column_name='# recurrent stroke - No')
self.statsDf['% recurrent stroke - No'] = self.statsDf.apply(lambda x: round(((x['# recurrent stroke - No']/(x['Total Patients'] - x['tmp'])) * 100), 2) if (x['Total Patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
###################
# DEPARTMENT TYPE #
###################
self.tmp = self.df.groupby(['Protocol ID', 'DEPARTMENT_TYPE']).size().to_frame('count').reset_index()
# Get patients from old version
self.statsDf = self._get_values_for_factors(column_name="DEPARTMENT_TYPE", value=-999, new_column_name='tmp')
self.statsDf = self._get_values_for_factors(column_name="DEPARTMENT_TYPE", value=1, new_column_name='# department type - neurology')
self.statsDf['% department type - neurology'] = self.statsDf.apply(lambda x: round(((x['# department type - neurology']/(x['Total Patients'] - x['tmp'])) * 100), 2) if (x['Total Patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DEPARTMENT_TYPE", value=2, new_column_name='# department type - neurosurgery')
self.statsDf['% department type - neurosurgery'] = self.statsDf.apply(lambda x: round(((x['# department type - neurosurgery']/(x['Total Patients'] - x['tmp'])) * 100), 2) if (x['Total Patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DEPARTMENT_TYPE", value=3, new_column_name='# department type - anesthesiology/resuscitation/critical care')
self.statsDf['% department type - anesthesiology/resuscitation/critical care'] = self.statsDf.apply(lambda x: round(((x['# department type - anesthesiology/resuscitation/critical care']/(x['Total Patients'] - x['tmp'])) * 100), 2) if (x['Total Patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DEPARTMENT_TYPE", value=4, new_column_name='# department type - internal medicine')
self.statsDf['% department type - internal medicine'] = self.statsDf.apply(lambda x: round(((x['# department type - internal medicine']/(x['Total Patients'] - x['tmp'])) * 100), 2) if (x['Total Patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DEPARTMENT_TYPE", value=5, new_column_name='# department type - geriatrics')
self.statsDf['% department type - geriatrics'] = self.statsDf.apply(lambda x: round(((x['# department type - geriatrics']/(x['Total Patients'] - x['tmp'])) * 100), 2) if (x['Total Patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DEPARTMENT_TYPE", value=6, new_column_name='# department type - Other')
self.statsDf['% department type - Other'] = self.statsDf.apply(lambda x: round(((x['# department type - Other']/(x['Total Patients'] - x['tmp'])) * 100), 2) if (x['Total Patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
###################
# HOSPITALIZED IN #
###################
self.tmp = self.df.groupby(['Protocol ID', 'HOSPITALIZED_IN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="HOSPITALIZED_IN", value=1, new_column_name='# patients hospitalized in stroke unit / ICU')
self.statsDf['% patients hospitalized in stroke unit / ICU'] = self.statsDf.apply(lambda x: round(((x['# patients hospitalized in stroke unit / ICU']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="HOSPITALIZED_IN", value=2, new_column_name='# patients hospitalized in monitored bed with telemetry')
self.statsDf['% patients hospitalized in monitored bed with telemetry'] = self.statsDf.apply(lambda x: round(((x['# patients hospitalized in monitored bed with telemetry']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="HOSPITALIZED_IN", value=3, new_column_name='# patients hospitalized in standard bed')
self.statsDf['% patients hospitalized in standard bed'] = self.statsDf.apply(lambda x: round(((x['# patients hospitalized in standard bed']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf['# patients hospitalized in stroke unit / ICU or monitored bed'] = self.statsDf['# patients hospitalized in stroke unit / ICU'] + self.statsDf['# patients hospitalized in monitored bed with telemetry']
self.statsDf['% patients hospitalized in stroke unit / ICU or monitored bed'] = self.statsDf.apply(lambda x: round(((x['# patients hospitalized in stroke unit / ICU or monitored bed']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
###############################
# ASSESSED FOR REHABILITATION #
###############################
self.tmp = is_ich_sah_cvt.groupby(['Protocol ID', 'ASSESSED_FOR_REHAB']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ASSESSED_FOR_REHAB", value=3, new_column_name='# patients assessed for rehabilitation - Not known')
self.statsDf['% patients assessed for rehabilitation - Not known'] = self.statsDf.apply(lambda x: round(((x['# patients assessed for rehabilitation - Not known']/x['is_ich_sah_cvt_patients']) * 100), 2) if x['is_ich_sah_cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ASSESSED_FOR_REHAB", value=1, new_column_name='# patients assessed for rehabilitation - Yes')
self.statsDf['% patients assessed for rehabilitation - Yes'] = self.statsDf.apply(lambda x: round(((x['# patients assessed for rehabilitation - Yes']/(x['is_ich_sah_cvt_patients'] - x['# patients assessed for rehabilitation - Not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# patients assessed for rehabilitation - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ASSESSED_FOR_REHAB", value=2, new_column_name='# patients assessed for rehabilitation - No')
self.statsDf['% patients assessed for rehabilitation - No'] = self.statsDf.apply(lambda x: round(((x['# patients assessed for rehabilitation - No']/(x['is_ich_sah_cvt_patients'] - x['# patients assessed for rehabilitation - Not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# patients assessed for rehabilitation - Not known']) > 0 else 0, axis=1)
###############
# STROKE TYPE #
###############
self.tmp = self.df.groupby(['Protocol ID', 'STROKE_TYPE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=1, new_column_name='# stroke type - ischemic stroke')
self.statsDf['% stroke type - ischemic stroke'] = self.statsDf.apply(lambda x: round(((x['# stroke type - ischemic stroke']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=2, new_column_name='# stroke type - intracerebral hemorrhage')
self.statsDf['% stroke type - intracerebral hemorrhage'] = self.statsDf.apply(lambda x: round(((x['# stroke type - intracerebral hemorrhage']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=3, new_column_name='# stroke type - transient ischemic attack')
self.statsDf['% stroke type - transient ischemic attack'] = self.statsDf.apply(lambda x: round(((x['# stroke type - transient ischemic attack']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=4, new_column_name='# stroke type - subarrachnoid hemorrhage')
self.statsDf['% stroke type - subarrachnoid hemorrhage'] = self.statsDf.apply(lambda x: round(((x['# stroke type - subarrachnoid hemorrhage']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=5, new_column_name='# stroke type - cerebral venous thrombosis')
self.statsDf['% stroke type - cerebral venous thrombosis'] = self.statsDf.apply(lambda x: round(((x['# stroke type - cerebral venous thrombosis']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STROKE_TYPE", value=6, new_column_name='# stroke type - undetermined stroke')
self.statsDf['% stroke type - undetermined stroke'] = self.statsDf.apply(lambda x: round(((x['# stroke type - undetermined stroke']/x['Total Patients']) * 100), 2) if x['Total Patients'] > 0 else 0, axis=1)
#######################
# CONSCIOUSNESS LEVEL #
#######################
self.tmp = is_ich_sah_cvt.groupby(['Protocol ID', 'CONSCIOUSNESS_LEVEL']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CONSCIOUSNESS_LEVEL", value=5, new_column_name='# level of consciousness - not known')
self.statsDf['% level of consciousness - not known'] = self.statsDf.apply(lambda x: round(((x['# level of consciousness - not known']/x['is_ich_sah_cvt_patients']) * 100), 2) if x['is_ich_sah_cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CONSCIOUSNESS_LEVEL", value=1, new_column_name='# level of consciousness - alert')
self.statsDf['% level of consciousness - alert'] = self.statsDf.apply(lambda x: round(((x['# level of consciousness - alert']/(x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CONSCIOUSNESS_LEVEL", value=2, new_column_name='# level of consciousness - drowsy')
self.statsDf['% level of consciousness - drowsy'] = self.statsDf.apply(lambda x: round(((x['# level of consciousness - drowsy']/(x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CONSCIOUSNESS_LEVEL", value=3, new_column_name='# level of consciousness - comatose')
self.statsDf['% level of consciousness - comatose'] = self.statsDf.apply(lambda x: round(((x['# level of consciousness - comatose']/(x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CONSCIOUSNESS_LEVEL", value=4, new_column_name='# level of consciousness - GCS')
self.statsDf['% level of consciousness - GCS'] = self.statsDf.apply(lambda x: round(((x['# level of consciousness - GCS']/(x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known']) > 0 else 0, axis=1)
#######
# GCS #
#######
# Get temporary dataframe with the level of consciousness - GCS
gcs = is_ich_sah_cvt[is_ich_sah_cvt['CONSCIOUSNESS_LEVEL'].isin([4])].copy()
# Calculate total number of patients with GCS level of consciousness per site
self.statsDf['gcs_patients'] = self._count_patients(dataframe=gcs)
self.tmp = gcs.groupby(['Protocol ID', 'GCS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="GCS", value=1, new_column_name='# GCS - 15-13')
self.statsDf['% GCS - 15-13'] = self.statsDf.apply(lambda x: round(((x['# GCS - 15-13']/x['gcs_patients']) * 100), 2) if x['gcs_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="GCS", value=2, new_column_name='# GCS - 12-8')
self.statsDf['% GCS - 12-8'] = self.statsDf.apply(lambda x: round(((x['# GCS - 12-8']/x['gcs_patients']) * 100), 2) if x['gcs_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="GCS", value=3, new_column_name='# GCS - <8')
self.statsDf['% GCS - <8'] = self.statsDf.apply(lambda x: round(((x['# GCS - <8']/x['gcs_patients']) * 100), 2) if x['gcs_patients'] > 0 else 0, axis=1)
self.statsDf.drop(['gcs_patients'], inplace=True, axis=1)
# GCS is mapped to the consciousness level. GCS 15-13 is mapped to alert, GCS 12-8 to drowsy and GCS < 8 to comatose
self.statsDf['alert_all'] = self.statsDf['# level of consciousness - alert'] + self.statsDf['# GCS - 15-13']
self.statsDf['alert_all_perc'] = self.statsDf.apply(lambda x: round(((x['alert_all']/(x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known']) > 0 else 0, axis=1)
self.statsDf['drowsy_all'] = self.statsDf['# level of consciousness - drowsy'] + self.statsDf['# GCS - 12-8']
self.statsDf['drowsy_all_perc'] = self.statsDf.apply(lambda x: round(((x['drowsy_all']/(x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known']) > 0 else 0, axis=1)
self.statsDf['comatose_all'] = self.statsDf['# level of consciousness - comatose'] + self.statsDf['# GCS - <8']
self.statsDf['comatose_all_perc'] = self.statsDf.apply(lambda x: round(((x['comatose_all']/(x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known'])) * 100), 2) if (x['is_ich_sah_cvt_patients'] - x['# level of consciousness - not known']) > 0 else 0, axis=1)
del gcs
#########
# NIHSS #
#########
# Seperate calculation for CZ
if country_code == 'CZ':
self.tmp = is_ich.groupby(['Protocol ID', 'NIHSS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="NIHSS", value=1, new_column_name='# NIHSS - Not performed')
self.statsDf['% NIHSS - Not performed'] = self.statsDf.apply(lambda x: round(((x['# NIHSS - Not performed']/x['is_ich_patients']) * 100), 2) if x['is_ich_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NIHSS", value=2, new_column_name='# NIHSS - Performed')
self.statsDf['% NIHSS - Performed'] = self.statsDf.apply(lambda x: round(((x['# NIHSS - Performed']/x['is_ich_patients']) * 100), 2) if x['is_ich_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NIHSS", value=3, new_column_name='# NIHSS - Not known')
self.statsDf['% NIHSS - Not known'] = self.statsDf.apply(lambda x: round(((x['# NIHSS - Not known']/x['is_ich_patients']) * 100), 2) if x['is_ich_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with patient who had performed NIHSS (NIHSS = 2)
nihss = is_ich[is_ich['NIHSS'].isin([2])]
tmpDf = nihss.groupby(['Protocol ID']).NIHSS_SCORE.agg(['median']).rename(columns={'median': 'NIHSS median score'})
factorDf = self.statsDf.merge(tmpDf, how='outer', left_on='Protocol ID', right_on='Protocol ID')
factorDf.fillna(0, inplace=True)
self.statsDf['NIHSS median score'] = factorDf['NIHSS median score']
del nihss
else:
self.tmp = is_ich_cvt.groupby(['Protocol ID', 'NIHSS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="NIHSS", value=1, new_column_name='# NIHSS - Not performed')
self.statsDf['% NIHSS - Not performed'] = self.statsDf.apply(lambda x: round(((x['# NIHSS - Not performed']/x['is_ich_cvt_patients']) * 100), 2) if x['is_ich_cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NIHSS", value=2, new_column_name='# NIHSS - Performed')
self.statsDf['% NIHSS - Performed'] = self.statsDf.apply(lambda x: round(((x['# NIHSS - Performed']/x['is_ich_cvt_patients']) * 100), 2) if x['is_ich_cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NIHSS", value=3, new_column_name='# NIHSS - Not known')
self.statsDf['% NIHSS - Not known'] = self.statsDf.apply(lambda x: round(((x['# NIHSS - Not known']/x['is_ich_cvt_patients']) * 100), 2) if x['is_ich_cvt_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with patient who had performed NIHSS (NIHSS = 2)
nihss = is_ich_cvt[is_ich_cvt['NIHSS'].isin([2])]
tmpDf = nihss.groupby(['Protocol ID']).NIHSS_SCORE.agg(['median']).rename(columns={'median': 'NIHSS median score'})
factorDf = self.statsDf.merge(tmpDf, how='outer', left_on='Protocol ID', right_on='Protocol ID')
factorDf.fillna(0, inplace=True)
self.statsDf['NIHSS median score'] = factorDf['NIHSS median score']
del nihss
##########
# CT/MRI #
##########
is_ich_tia_cvt_not_referred = is_ich_tia_cvt.loc[~(is_ich_tia_cvt['STROKE_TYPE'].isin([1]) & is_ich_tia_cvt['RECANALIZATION_PROCEDURES'].isin([5,6,7,8]))].copy()
self.statsDf['is_ich_tia_cvt_not_referred_patients'] = self._count_patients(dataframe=is_ich_tia_cvt_not_referred)
self.tmp = is_ich_tia_cvt_not_referred.groupby(['Protocol ID', 'CT_MRI']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CT_MRI", value=1, new_column_name='# CT/MRI - Not performed')
self.statsDf['% CT/MRI - Not performed'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - Not performed']/x['is_ich_tia_cvt_not_referred_patients']) * 100), 2) if x['is_ich_tia_cvt_not_referred_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CT_MRI", value=2, new_column_name='# CT/MRI - performed')
self.statsDf['% CT/MRI - performed'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - performed']/x['is_ich_tia_cvt_not_referred_patients']) * 100), 2) if x['is_ich_tia_cvt_not_referred_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CT_MRI", value=3, new_column_name='# CT/MRI - Not known')
self.statsDf['% CT/MRI - Not known'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - Not known']/x['is_ich_tia_cvt_not_referred_patients']) * 100), 2) if x['is_ich_tia_cvt_not_referred_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with patients who had performed CT/MRI (CT_MRI = 2)
ct_mri = is_ich_tia_cvt_not_referred[is_ich_tia_cvt_not_referred['CT_MRI'].isin([2])]
ct_mri['CT_TIME'] = pd.to_numeric(ct_mri['CT_TIME'])
self.tmp = ct_mri.groupby(['Protocol ID', 'CT_TIME']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CT_TIME", value=1, new_column_name='# CT/MRI - Performed within 1 hour after admission')
self.statsDf['% CT/MRI - Performed within 1 hour after admission'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - Performed within 1 hour after admission']/x['# CT/MRI - performed']) * 100), 2) if x['# CT/MRI - performed'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CT_TIME", value=2, new_column_name='# CT/MRI - Performed later than 1 hour after admission')
self.statsDf['% CT/MRI - Performed later than 1 hour after admission'] = self.statsDf.apply(lambda x: round(((x['# CT/MRI - Performed later than 1 hour after admission']/x['# CT/MRI - performed']) * 100), 2) if x['# CT/MRI - performed'] > 0 else 0, axis=1)
self.statsDf.drop(['is_ich_tia_cvt_not_referred_patients'], inplace=True, axis=1)
del ct_mri, is_ich_tia_cvt_not_referred
####################
# VASCULAR IMAGING #
####################
self.tmp = ich_sah.groupby(['Protocol ID', 'CTA_MRA_DSA']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors_more_values(column_name="CTA_MRA_DSA", value={'1', '1,2', '1,3'}, new_column_name='# vascular imaging - CTA')
self.statsDf['% vascular imaging - CTA'] = self.statsDf.apply(lambda x: round(((x['# vascular imaging - CTA']/x['ich_sah_patients']) * 100), 2) if x['ich_sah_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_more_values(column_name="CTA_MRA_DSA", value={'2', '1,2', '2,3'}, new_column_name='# vascular imaging - MRA')
self.statsDf['% vascular imaging - MRA'] = self.statsDf.apply(lambda x: round(((x['# vascular imaging - MRA']/x['ich_sah_patients']) * 100), 2) if x['ich_sah_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_more_values(column_name="CTA_MRA_DSA", value={'3', '1,3', '2,3'}, new_column_name='# vascular imaging - DSA')
self.statsDf['% vascular imaging - DSA'] = self.statsDf.apply(lambda x: round(((x['# vascular imaging - DSA']/x['ich_sah_patients']) * 100), 2) if x['ich_sah_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_more_values(column_name="CTA_MRA_DSA", value={'4'}, new_column_name='# vascular imaging - None')
self.statsDf['% vascular imaging - None'] = self.statsDf.apply(lambda x: round(((x['# vascular imaging - None']/x['ich_sah_patients']) * 100), 2) if x['ich_sah_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_more_values(column_name="CTA_MRA_DSA", value={'1,2', '1,3', '2,3'}, new_column_name='# vascular imaging - two modalities')
self.statsDf['% vascular imaging - two modalities'] = self.statsDf.apply(lambda x: round(((x['# vascular imaging - two modalities']/x['ich_sah_patients']) * 100), 2) if x['ich_sah_patients'] > 0 else 0, axis=1)
### DATA NORMLAIZATION
norm_tmp = self.statsDf[['% vascular imaging - CTA', '% vascular imaging - MRA', '% vascular imaging - DSA', '% vascular imaging - None']].copy()
norm_tmp.loc[:,'rowsums'] = norm_tmp.sum(axis=1)
self.statsDf['vascular_imaging_cta_norm'] = ((norm_tmp['% vascular imaging - CTA']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['vascular_imaging_mra_norm'] = ((norm_tmp['% vascular imaging - MRA']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['vascular_imaging_dsa_norm'] = ((norm_tmp['% vascular imaging - DSA']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['vascular_imaging_none_norm'] = ((norm_tmp['% vascular imaging - None']/norm_tmp['rowsums']) * 100).round(decimals=2)
del norm_tmp
##############
# VENTILATOR #
##############
# Seperate calculation for CZ (difference in the stroke types)
if country_code == 'CZ':
self.tmp = is_ich.groupby(['Protocol ID', 'VENTILATOR']).size().to_frame('count').reset_index()
# Get number of patients from the old version
self.statsDf = self._get_values_for_factors(column_name="VENTILATOR", value=-999, new_column_name='tmp')
self.statsDf = self._get_values_for_factors(column_name="VENTILATOR", value=3, new_column_name='# patients put on ventilator - Not known')
self.statsDf['% patients put on ventilator - Not known'] = self.statsDf.apply(lambda x: round(((x['# patients put on ventilator - Not known']/(x['is_ich_patients'] - x['tmp'])) * 100), 2) if (x['is_ich_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="VENTILATOR", value=1, new_column_name='# patients put on ventilator - Yes')
self.statsDf['% patients put on ventilator - Yes'] = self.statsDf.apply(lambda x: round(((x['# patients put on ventilator - Yes']/(x['is_ich_patients'] - x['tmp'] - x['# patients put on ventilator - Not known'])) * 100), 2) if (x['is_ich_patients'] - x['tmp'] - x['# patients put on ventilator - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="VENTILATOR", value=2, new_column_name='# patients put on ventilator - No')
self.statsDf['% patients put on ventilator - No'] = self.statsDf.apply(lambda x: round(((x['# patients put on ventilator - No']/(x['is_ich_patients'] - x['tmp'] - x['# patients put on ventilator - Not known'])) * 100), 2) if (x['is_ich_patients'] - x['tmp'] - x['# patients put on ventilator - Not known']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
else:
self.tmp = is_ich_cvt.groupby(['Protocol ID', 'VENTILATOR']).size().to_frame('count').reset_index()
# Get number of patients from the old version
self.statsDf = self._get_values_for_factors(column_name="VENTILATOR", value=-999, new_column_name='tmp')
self.statsDf = self._get_values_for_factors(column_name="VENTILATOR", value=3, new_column_name='# patients put on ventilator - Not known')
self.statsDf['% patients put on ventilator - Not known'] = self.statsDf.apply(lambda x: round(((x['# patients put on ventilator - Not known']/(x['is_ich_cvt_patients'] - x['tmp'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="VENTILATOR", value=1, new_column_name='# patients put on ventilator - Yes')
self.statsDf['% patients put on ventilator - Yes'] = self.statsDf.apply(lambda x: round(((x['# patients put on ventilator - Yes']/(x['is_ich_cvt_patients'] - x['tmp'] - x['# patients put on ventilator - Not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['tmp'] - x['# patients put on ventilator - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="VENTILATOR", value=2, new_column_name='# patients put on ventilator - No')
self.statsDf['% patients put on ventilator - No'] = self.statsDf.apply(lambda x: round(((x['# patients put on ventilator - No']/(x['is_ich_cvt_patients'] - x['tmp'] - x['# patients put on ventilator - Not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['tmp'] - x['# patients put on ventilator - Not known']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
#############################
# RECANALIZATION PROCEDURES #
#############################
self.tmp = isch.groupby(['Protocol ID', 'RECANALIZATION_PROCEDURES']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=1, new_column_name='# recanalization procedures - Not done')
self.statsDf['% recanalization procedures - Not done'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Not done']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=2, new_column_name='# recanalization procedures - IV tPa')
self.statsDf['% recanalization procedures - IV tPa'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - IV tPa']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=3, new_column_name='# recanalization procedures - IV tPa + endovascular treatment')
self.statsDf['% recanalization procedures - IV tPa + endovascular treatment'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - IV tPa + endovascular treatment']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=4, new_column_name='# recanalization procedures - Endovascular treatment alone')
self.statsDf['% recanalization procedures - Endovascular treatment alone'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Endovascular treatment alone']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=5, new_column_name='# recanalization procedures - IV tPa + referred to another centre for endovascular treatment')
self.statsDf['% recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=6, new_column_name='# recanalization procedures - Referred to another centre for endovascular treatment')
self.statsDf['% recanalization procedures - Referred to another centre for endovascular treatment'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Referred to another centre for endovascular treatment']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=7, new_column_name='# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre')
self.statsDf['% recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=8, new_column_name='# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre')
self.statsDf['% recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="RECANALIZATION_PROCEDURES", value=9, new_column_name='# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre')
self.statsDf['% recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'] = self.statsDf.apply(lambda x: round(((x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# tag::recanalized_patients[]
recanalized_df = isch.loc[isch['IVT_DONE'].isin([1]) | isch['TBY_DONE'].isin([1])]
self.statsDf['# patients recanalized'] = self._count_patients(dataframe=recanalized_df)
recanalized_denominator_df = isch.loc[isch['IVT_DONE'].isin([1]) | isch['TBY_DONE'].isin([1]) | isch['RECANALIZATION_PROCEDURES'].isin([1])]
self.statsDf['denominator'] =self._count_patients(dataframe=recanalized_denominator_df)
self.statsDf['% patients recanalized'] = self.statsDf.apply(lambda x: round(((x['# patients recanalized']/x['denominator']) * 100), 2) if x['denominator'] > 0 else 0, axis=1)
self.statsDf.drop(['denominator'], inplace=True, axis=1)
del recanalized_df
# end::recanalized_patients[]
"""
# Get recanalization procedure differently for CZ, they are taking the possible values differently
if country_code == 'CZ':
# self.statsDf['# patients recanalized'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] + x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'], axis=1)
recanalized_df = isch.loc[isch['IVT_DONE'].isin([1]) | isch['TBY_DONE'].isin([1])]
self.statsDf['# patients recanalized'] = self._count_patients(dataframe=recanalized_df)
recanalized_denominator_df = isch.loc[isch['IVT_DONE'].isin([1]) | isch['TBY_DONE'].isin([1]) | isch['RECANALIZATION_PROCEDURES'].isin([1])]
self.statsDf['denominator'] =self._count_patients(dataframe=recanalized_denominator_df)
#self.statsDf['# patients recanalized'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone'], axis=1)
#self.statsDf['% patients recanalized'] = self.statsDf.apply(lambda x: round(((x['# patients recanalized']/(x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'])) * 100), 2) if (x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre']) > 0 else 0, axis=1)
#self.statsDf['% patients recanalized'] = self.statsDf.apply(lambda x: round(((x['# patients recanalized']/(x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'])) * 100), 2) if (x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre']) > 0 else 0, axis=1)
self.statsDf['% patients recanalized'] = self.statsDf.apply(lambda x: round(((x['# patients recanalized']/x['denominator']) * 100), 2) if x['denominator'] > 0 else 0, axis=1)
self.statsDf.drop(['denominator'], inplace=True, axis=1)
else:
self.statsDf['# patients recanalized'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone'], axis=1)
self.statsDf['% patients recanalized'] = self.statsDf.apply(lambda x: round(((x['# patients recanalized']/(x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'])) * 100), 2) if (x['isch_patients'] - x['# recanalization procedures - Referred to another centre for endovascular treatment'] - x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] - x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre']) > 0 else 0, axis=1)
"""
##############
# MEDIAN DTN #
##############
def _median_confidence_interval(data, confidence=0.95):
""" The function calculating median confidence interval.
:param confidence: the value of confidence interval
:type confidence: int/float
:returns: rv.median(), rv.interval(confidence)
"""
a = np.array(data)
w = a + 1
# create custom discrete random variable from data set
rv = st.rv_discrete(values=(data, w/w.sum()))
return rv.median(), rv.interval(confidence)
def _mean_confidence_interval(data, confidence=0.95):
""" The function calculating mean confidence interval.
:param confidence: the value of confidence interval
:type confidence: int/float
:returns: m, m-h, m+h
"""
n = len(data)
m = mean(data)
std_err = sem(data)
h = std_err * t.ppf((1 + confidence) / 2, n - 1)
return m, m-h, m+h
# tag::median_dtn[]
# Calculate number of patients who underwent IVT
self.tmp = isch.loc[~isch['HOSPITAL_STROKE_IVT_TIMESTAMPS'].isin([1])].groupby(['Protocol ID', 'IVT_DONE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="IVT_DONE", value=1, new_column_name='# IV tPa')
self.statsDf['% IV tPa'] = self.statsDf.apply(lambda x: round(((x['# IV tPa']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been treated with thrombolysis
recanalization_procedure_iv_tpa = isch.loc[(isch['IVT_DONE'].isin([1])) & (~isch['HOSPITAL_STROKE_IVT_TIMESTAMPS'].isin([1]))].copy()
# recanalization_procedure_iv_tpa = isch.loc[isch['IVT_DONE'].isin([1])].copy()
recanalization_procedure_iv_tpa.fillna(0, inplace=True)
# Create one column with times of door to thrombolysis
thrombolysis = recanalization_procedure_iv_tpa[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 400)].copy()
tmp = thrombolysis.groupby(['Protocol ID']).IVTPA.agg(['median']).rename(columns={'median': 'Median DTN (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
del thrombolysis
# end::median_dtn[]
"""
if country_code == 'CZ':
self.tmp = isch.groupby(['Protocol ID', 'IVT_DONE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="IVT_DONE", value=1, new_column_name='# IV tPa')
self.statsDf['% IV tPa'] = self.statsDf.apply(lambda x: round(((x['# IV tPa']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been treated with thrombolysis
recanalization_procedure_iv_tpa = isch[isch['IVT_DONE'].isin([1])].copy()
recanalization_procedure_iv_tpa.fillna(0, inplace=True)
# Create one column with times of door to thrombolysis
thrombolysis = recanalization_procedure_iv_tpa[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 400)].copy()
tmp = thrombolysis.groupby(['Protocol ID']).IVTPA.agg(['median']).rename(columns={'median': 'Median DTN (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
else:
self.statsDf.loc[:, '# IV tPa'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'], axis=1)
self.statsDf['% IV tPa'] = self.statsDf.apply(lambda x: round(((x['# IV tPa']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been treated with thrombolysis
recanalization_procedure_iv_tpa = isch[isch['RECANALIZATION_PROCEDURES'].isin([2, 3, 5])].copy()
recanalization_procedure_iv_tpa.fillna(0, inplace=True)
# Create one column with times of door to thrombolysis
recanalization_procedure_iv_tpa['IVTPA'] = recanalization_procedure_iv_tpa['IVT_ONLY_NEEDLE_TIME'] + recanalization_procedure_iv_tpa['IVT_ONLY_NEEDLE_TIME_MIN'] + recanalization_procedure_iv_tpa['IVT_TBY_NEEDLE_TIME'] + recanalization_procedure_iv_tpa['IVT_TBY_NEEDLE_TIME_MIN'] + recanalization_procedure_iv_tpa['IVT_TBY_REFER_NEEDLE_TIME'] + recanalization_procedure_iv_tpa['IVT_TBY_REFER_NEEDLE_TIME_MIN']
# sites_ids = recanalization_procedure_iv_tpa['Protocol ID'].tolist()
# sites_ids = set(sites_ids)
# interval_vals = {}
# for idx, val in enumerate(sites_ids):
# meanv, lbound, ubound = _mean_confidence_interval(recanalization_procedure_iv_tpa[recanalization_procedure_iv_tpa['Protocol ID'] == val]['IVTPA'].tolist())
# medianv, interval_median = _median_confidence_interval(recanalization_procedure_iv_tpa[recanalization_procedure_iv_tpa['Protocol ID'] == val]['IVTPA'].tolist())
# interval_vals[str(idx)] = [val, "({0:.2f},{1:.2f})".format(lbound, ubound), "{0}".format(interval_median)]
# #interval_vals.append("{0}: ({1}-{2})".format(i, lowb, upb))
# #print(interval_vals)
# interval_vals_df = pd.DataFrame.from_dict(interval_vals, orient='index', columns=['Protocol ID', 'Confidence interval DTN (Mean)', 'Confidence interval DTN (Median)'])
tmp = recanalization_procedure_iv_tpa.groupby(['Protocol ID']).IVTPA.agg(['median']).rename(columns={'median': 'Median DTN (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
# self.statsDf = self.statsDf.merge(interval_vals_df, how='outer')
"""
##############
# MEDIAN DTG #
##############
# tag::median_dtg[]
self.tmp = isch.loc[~isch['HOSPITAL_STROKE_TBY_TIMESTAMPS'].isin([1])].groupby(['Protocol ID', 'TBY_DONE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="TBY_DONE", value=1, new_column_name='# TBY')
self.statsDf['% TBY'] = self.statsDf.apply(lambda x: round(((x['# TBY']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been treated with thrombolysis
recanalization_procedure_tby_dtg = isch.loc[(isch['TBY_DONE'].isin([1])) & (~isch['HOSPITAL_STROKE_TBY_TIMESTAMPS'].isin([1]))].copy()
# recanalization_procedure_tby_dtg = isch.loc[isch['TBY_DONE'].isin([1])].copy()
recanalization_procedure_tby_dtg.fillna(0, inplace=True)
# Create one column with times of door to thrombolysis
thrombectomy = recanalization_procedure_tby_dtg[(recanalization_procedure_tby_dtg['TBY'] > 0) & (recanalization_procedure_tby_dtg['TBY'] <= 700)].copy()
tmp = thrombectomy.groupby(['Protocol ID']).TBY.agg(['median']).rename(columns={'median': 'Median DTG (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
del thrombectomy
# end::median_dtg[]
"""
# Seperate calculation of TBY for CZ
if country_code == 'CZ':
self.tmp = isch.groupby(['Protocol ID', 'TBY_DONE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="TBY_DONE", value=1, new_column_name='# TBY')
self.statsDf['% TBY'] = self.statsDf.apply(lambda x: round(((x['# TBY']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been treated with thrombolysis
recanalization_procedure_tby_dtg = isch[isch['TBY_DONE'].isin([1])].copy()
recanalization_procedure_tby_dtg.fillna(0, inplace=True)
# Create one column with times of door to thrombolysis
thrombectomy = recanalization_procedure_tby_dtg[(recanalization_procedure_tby_dtg['TBY'] > 0) & (recanalization_procedure_tby_dtg['TBY'] <= 700)].copy()
tmp = thrombectomy.groupby(['Protocol ID']).TBY.agg(['median']).rename(columns={'median': 'Median DTG (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
"""
# self.statsDf.loc[:, '# TBY'] = self.statsDf.apply(lambda x: x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] + x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'], axis=1)
"""
self.statsDf.loc[:, '# TBY'] = self.statsDf.apply(lambda x: x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - IV tPa + endovascular treatment'], axis=1)
self.statsDf['% TBY'] = self.statsDf.apply(lambda x: round(((x['# TBY']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been treated with thrombectomy
# recanalization_procedure_tby_dtg = isch[isch['RECANALIZATION_PROCEDURES'].isin([4, 3, 6, 7, 8])].copy()
recanalization_procedure_tby_dtg = isch[isch['RECANALIZATION_PROCEDURES'].isin([4, 3])].copy()
recanalization_procedure_tby_dtg.fillna(0, inplace=True)
# Get IVTPA in minutes
# recanalization_procedure_tby_dtg['TBY'] = recanalization_procedure_tby_dtg['TBY_ONLY_GROIN_PUNCTURE_TIME'] + recanalization_procedure_tby_dtg['TBY_ONLY_GROIN_TIME_MIN'] + recanalization_procedure_tby_dtg['IVT_TBY_GROIN_TIME'] + recanalization_procedure_tby_dtg['IVT_TBY_GROIN_TIME_MIN'] + recanalization_procedure_tby_dtg['TBY_REFER_ALL_GROIN_PUNCTURE_TIME'] + recanalization_procedure_tby_dtg['TBY_REFER_LIM_GROIN_PUNCTURE_TIME'] + recanalization_procedure_tby_dtg['TBY_REFER_ALL_GROIN_PUNCTURE_TIME_MIN'] + recanalization_procedure_tby_dtg['TBY_REFER_LIM_GROIN_PUNCTURE_TIME_MIN']
recanalization_procedure_tby_dtg['TBY'] = recanalization_procedure_tby_dtg['TBY_ONLY_GROIN_PUNCTURE_TIME'] + recanalization_procedure_tby_dtg['TBY_ONLY_GROIN_TIME_MIN'] + recanalization_procedure_tby_dtg['IVT_TBY_GROIN_TIME'] + recanalization_procedure_tby_dtg['IVT_TBY_GROIN_TIME_MIN']
"""
# sites_ids = recanalization_procedure_tby_dtg['Protocol ID'].tolist()
# sites_ids = set(sites_ids)
# interval_vals = {}
# for idx, val in enumerate(sites_ids):
# meanv, lbound, ubound = _mean_confidence_interval(recanalization_procedure_tby_dtg[recanalization_procedure_tby_dtg['Protocol ID'] == val]['TBY'].tolist())
# medianv, interval_median = _median_confidence_interval(recanalization_procedure_tby_dtg[recanalization_procedure_tby_dtg['Protocol ID'] == val]['TBY'].tolist())
# interval_vals[str(idx)] = [val, "({0:.2f}-{1:.2f})".format(lbound, ubound), "{0}".format(interval_median)]
# interval_vals_df = pd.DataFrame.from_dict(interval_vals, orient='index', columns=['Protocol ID', 'Confidence interval DTG (Mean)', 'Confidence interval DTG (Median)'])
# recanalization_procedure_tby['TBY'] = recanalization_procedure_tby.loc[:, ['TBY_ONLY_GROIN_PUNCTURE_TIME', 'TBY_ONLY_GROIN_PUNCTURE_TIME_MIN', 'IVT_TBY_GROIN_TIME', 'IVT_TBY_GROIN_TIME_MIN']].sum(1).reset_index()[0].tolist()
"""
else:
self.statsDf.loc[:, '# TBY'] = self.statsDf.apply(lambda x: x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - IV tPa + endovascular treatment'], axis=1)
self.statsDf['% TBY'] = self.statsDf.apply(lambda x: round(((x['# TBY']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been treated with thrombectomy
recanalization_procedure_tby_dtg = isch[isch['RECANALIZATION_PROCEDURES'].isin([4, 3])].copy()
recanalization_procedure_tby_dtg.fillna(0, inplace=True)
# Create one column with times of door to thrombectomy
recanalization_procedure_tby_dtg['TBY'] = recanalization_procedure_tby_dtg['TBY_ONLY_GROIN_PUNCTURE_TIME'] + recanalization_procedure_tby_dtg['TBY_ONLY_GROIN_TIME_MIN'] + recanalization_procedure_tby_dtg['IVT_TBY_GROIN_TIME'] + recanalization_procedure_tby_dtg['IVT_TBY_GROIN_TIME_MIN']
# sites_ids = recanalization_procedure_tby_dtg['Protocol ID'].tolist()
# sites_ids = set(sites_ids)
# interval_vals = {}
# for idx, val in enumerate(sites_ids):
# meanv, lbound, ubound = _mean_confidence_interval(recanalization_procedure_tby_dtg[recanalization_procedure_tby_dtg['Protocol ID'] == val]['IVTPA'].tolist())
# medianv, interval_median = _median_confidence_interval(recanalization_procedure_tby_dtg[recanalization_procedure_tby_dtg['Protocol ID'] == val]['IVTPA'].tolist())
# interval_vals[str(idx)] = [val, "({0:.2f}-{1:.2f})".format(lbound, ubound), "{0}".format(interval_median)]
# interval_vals_df = pd.DataFrame.from_dict(interval_vals, orient='index', columns=['Protocol ID', 'Confidence interval DTG (Mean)', 'Confidence interval DTG (Median)'])
# recanalization_procedure_tby['TBY'] = recanalization_procedure_tby.loc[:, ['TBY_ONLY_GROIN_PUNCTURE_TIME', 'TBY_ONLY_GROIN_PUNCTURE_TIME_MIN', 'IVT_TBY_GROIN_TIME', 'IVT_TBY_GROIN_TIME_MIN']].sum(1).reset_index()[0].tolist()
tmp = recanalization_procedure_tby_dtg.groupby(['Protocol ID']).TBY.agg(['median']).rename(columns={'median': 'Median DTG (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
# self.statsDf = self.statsDf.merge(interval_vals_df, how='outer')
"""
###############
# MEDIAN DIDO #
###############
# tag::median_dido[]
self.tmp = isch.groupby(['Protocol ID', 'REFERRED_DONE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="REFERRED_DONE", value=1, new_column_name='# DIDO TBY')
self.statsDf['% DIDO TBY'] = self.statsDf.apply(lambda x: round(((x['# DIDO TBY']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been treated with thrombolysis
recanalization_procedure_tby_dido = isch[isch['REFERRED_DONE'].isin([1])].copy()
recanalization_procedure_tby_dido.fillna(0, inplace=True)
# Create one column with times of door to thrombolysis
dido = recanalization_procedure_tby_dido[(recanalization_procedure_tby_dido['DIDO'] > 0)].copy()
tmp = dido.groupby(['Protocol ID']).DIDO.agg(['median']).rename(columns={'median': 'Median TBY DIDO (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
del recanalization_procedure_tby_dido, dido
# end::median_dido[]
"""
if country_code == 'CZ':
# self.statsDf.loc[:, '# DIDO TBY'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] + x['# recanalization procedures - Referred to another centre for endovascular treatment'], axis=1)
self.statsDf.loc[:, '# DIDO TBY'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] + x['# recanalization procedures - Referred to another centre for endovascular treatment'] + x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] + x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'], axis=1)
# self.statsDf['% DIDO TBY'] = self.statsDf.apply(lambda x: round(((x['# DIDO TBY']/(x['isch_patients'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'] - x['# recanalization procedures - Not done'])) * 100), 2) if (x['isch_patients'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'] - x['# recanalization procedures - Not done']) > 0 else 0, axis=1)
# Get only patients recanalized TBY
# recanalization_procedure_tby_dido = isch[isch['RECANALIZATION_PROCEDURES'].isin([5, 6, 7, 8])].copy()
# For CZ remove referred for endovascular treatment from DIDO time because they are taking it as the patient was referred to them for TBY
# recanalization_procedure_tby_dido = isch[isch['RECANALIZATION_PROCEDURES'].isin([5, 6])].copy()
# Create temporary dataframe with the patients who has been transferred for recanalization procedures
recanalization_procedure_tby_dido = isch[isch['RECANALIZATION_PROCEDURES'].isin([5, 6, 7, 8])].copy()
recanalization_procedure_tby_dido.fillna(0, inplace=True)
# Get DIDO in minutes
# recanalization_procedure_tby_dido['DIDO'] = recanalization_procedure_tby_dido['IVT_TBY_REFER_DIDO_TIME'] + recanalization_procedure_tby_dido['IVT_TBY_REFER_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_ALL_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_ALL_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_LIM_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_LIM_DIDO_TIME_MIN']
# recanalization_procedure_tby_dido['DIDO'] = recanalization_procedure_tby_dido['IVT_TBY_REFER_DIDO_TIME'] + recanalization_procedure_tby_dido['IVT_TBY_REFER_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_DIDO_TIME_MIN']
# Create one column with times of door-in door-out time
recanalization_procedure_tby_dido['DIDO'] = recanalization_procedure_tby_dido['IVT_TBY_REFER_DIDO_TIME'] + recanalization_procedure_tby_dido['IVT_TBY_REFER_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_ALL_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_ALL_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_LIM_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_LIM_DIDO_TIME_MIN']
tmp = recanalization_procedure_tby_dido.groupby(['Protocol ID']).DIDO.agg(['median']).rename(columns={'median': 'Median TBY DIDO (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
else:
self.statsDf.loc[:, '# DIDO TBY'] = self.statsDf.apply(lambda x: x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'] + x['# recanalization procedures - Referred to another centre for endovascular treatment'] + x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] + x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre'], axis=1)
# self.statsDf['% DIDO TBY'] = self.statsDf.apply(lambda x: round(((x['# DIDO TBY']/(x['isch_patients'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'] - x['# recanalization procedures - Not done'])) * 100), 2) if (x['isch_patients'] - x['# recanalization procedures - Returned to the initial centre after recanalization procedures were performed at another centre'] - x['# recanalization procedures - Not done']) > 0 else 0, axis=1)
# Create temporary dataframe with the patients who has been transferred for recanalization procedures
recanalization_procedure_tby_dido = isch[isch['RECANALIZATION_PROCEDURES'].isin([5, 6, 7, 8])].copy()
recanalization_procedure_tby_dido.fillna(0, inplace=True)
# Create one column with times of door-in door-out time
recanalization_procedure_tby_dido['DIDO'] = recanalization_procedure_tby_dido['IVT_TBY_REFER_DIDO_TIME'] + recanalization_procedure_tby_dido['IVT_TBY_REFER_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_ALL_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_ALL_DIDO_TIME_MIN'] + recanalization_procedure_tby_dido['TBY_REFER_LIM_DIDO_TIME'] + recanalization_procedure_tby_dido['TBY_REFER_LIM_DIDO_TIME_MIN']
tmp = recanalization_procedure_tby_dido.groupby(['Protocol ID']).DIDO.agg(['median']).rename(columns={'median': 'Median TBY DIDO (minutes)'}).reset_index()
self.statsDf = self.statsDf.merge(tmp, how='outer')
self.statsDf.fillna(0, inplace=True)
"""
#######################
# DYPSHAGIA SCREENING #
#######################
# For CZ exclude CVT from the calculation
# tag::dysphagia_screening[]
if country_code == 'CZ':
is_ich_not_referred = is_ich.loc[~(is_ich['crf_parent_name'].isin(['F_RESQ_IVT_TBY_CZ_4']) & is_ich['RECANALIZATION_PROCEDURES'].isin([5,6]))].copy()
self.statsDf['is_ich_not_referred_patients'] = self._count_patients(dataframe=is_ich_not_referred)
self.tmp = is_ich_not_referred.groupby(['Protocol ID', 'DYSPHAGIA_SCREENING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=6, new_column_name='# dysphagia screening - not known')
self.statsDf['% dysphagia screening - not known'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - not known']/x['is_ich_not_referred_patients']) * 100), 2) if x['is_ich_not_referred_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=1, new_column_name='# dysphagia screening - Guss test')
self.statsDf['% dysphagia screening - Guss test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Guss test']/(x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=2, new_column_name='# dysphagia screening - Other test')
self.statsDf['% dysphagia screening - Other test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Other test']/(x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=3, new_column_name='# dysphagia screening - Another centre')
self.statsDf['% dysphagia screening - Another centre'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Another centre']/(x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=4, new_column_name='# dysphagia screening - Not done')
self.statsDf['% dysphagia screening - Not done'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Not done']/(x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=5, new_column_name='# dysphagia screening - Unable to test')
self.statsDf['% dysphagia screening - Unable to test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Unable to test']/(x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_not_referred_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
# self.statsDf['# dysphagia screening done'] = self.statsDf['# dysphagia screening - Guss test'] + self.statsDf['# dysphagia screening - Other test'] + self.statsDf['# dysphagia screening - Another centre']
self.statsDf['# dysphagia screening done'] = self.statsDf['# dysphagia screening - Guss test'] + self.statsDf['# dysphagia screening - Other test']
# self.statsDf['% dysphagia screening done'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening done']/(x['is_ich_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf['% dysphagia screening done'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening done']/(x['# dysphagia screening done'] + x['# dysphagia screening - Not done'])) * 100), 2) if (x['# dysphagia screening done'] + x['# dysphagia screening - Not done']) > 0 else 0, axis=1)
else:
self.tmp = is_ich_cvt.groupby(['Protocol ID', 'DYSPHAGIA_SCREENING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=6, new_column_name='# dysphagia screening - not known')
self.statsDf['% dysphagia screening - not known'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - not known']/x['is_ich_cvt_patients']) * 100), 2) if x['is_ich_cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=1, new_column_name='# dysphagia screening - Guss test')
self.statsDf['% dysphagia screening - Guss test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Guss test']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=2, new_column_name='# dysphagia screening - Other test')
self.statsDf['% dysphagia screening - Other test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Other test']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=3, new_column_name='# dysphagia screening - Another centre')
self.statsDf['% dysphagia screening - Another centre'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Another centre']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=4, new_column_name='# dysphagia screening - Not done')
self.statsDf['% dysphagia screening - Not done'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Not done']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING", value=5, new_column_name='# dysphagia screening - Unable to test')
self.statsDf['% dysphagia screening - Unable to test'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening - Unable to test']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
self.statsDf['# dysphagia screening done'] = self.statsDf['# dysphagia screening - Guss test'] + self.statsDf['# dysphagia screening - Other test'] + self.statsDf['# dysphagia screening - Another centre']
self.statsDf['% dysphagia screening done'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening done']/(x['is_ich_cvt_patients'] - x['# dysphagia screening - not known'])) * 100), 2) if (x['is_ich_cvt_patients'] - x['# dysphagia screening - not known']) > 0 else 0, axis=1)
# end::dysphagia_screening[]
############################
# DYPSHAGIA SCREENING TIME #
############################
self.tmp = self.df.groupby(['Protocol ID', 'DYSPHAGIA_SCREENING_TIME']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING_TIME", value=1, new_column_name='# dysphagia screening time - Within first 24 hours')
self.statsDf = self._get_values_for_factors(column_name="DYSPHAGIA_SCREENING_TIME", value=2, new_column_name='# dysphagia screening time - After first 24 hours')
self.statsDf['% dysphagia screening time - Within first 24 hours'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening time - Within first 24 hours']/(x['# dysphagia screening time - Within first 24 hours'] + x['# dysphagia screening time - After first 24 hours'])) * 100), 2) if (x['# dysphagia screening time - Within first 24 hours'] + x['# dysphagia screening time - After first 24 hours']) > 0 else 0, axis=1)
self.statsDf['% dysphagia screening time - After first 24 hours'] = self.statsDf.apply(lambda x: round(((x['# dysphagia screening time - After first 24 hours']/(x['# dysphagia screening time - Within first 24 hours'] + x['# dysphagia screening time - After first 24 hours'])) * 100), 2) if (x['# dysphagia screening time - Within first 24 hours'] + x['# dysphagia screening time - After first 24 hours']) > 0 else 0, axis=1)
###################
# HEMICRANIECTOMY #
###################
self.tmp = isch.groupby(['Protocol ID', 'HEMICRANIECTOMY']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="HEMICRANIECTOMY", value=1, new_column_name='# hemicraniectomy - Yes')
self.statsDf['% hemicraniectomy - Yes'] = self.statsDf.apply(lambda x: round(((x['# hemicraniectomy - Yes']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="HEMICRANIECTOMY", value=2, new_column_name='# hemicraniectomy - No')
self.statsDf['% hemicraniectomy - No'] = self.statsDf.apply(lambda x: round(((x['# hemicraniectomy - No']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="HEMICRANIECTOMY", value=3, new_column_name='# hemicraniectomy - Referred to another centre')
self.statsDf['% hemicraniectomy - Referred to another centre'] = self.statsDf.apply(lambda x: round(((x['# hemicraniectomy - Referred to another centre']/x['isch_patients']) * 100), 2) if x['isch_patients'] > 0 else 0, axis=1)
################
# NEUROSURGERY #
################
self.tmp = ich.groupby(['Protocol ID', 'NEUROSURGERY']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="NEUROSURGERY", value=3, new_column_name='# neurosurgery - Not known')
self.statsDf['% neurosurgery - Not known'] = self.statsDf.apply(lambda x: round(((x['# neurosurgery - Not known']/x['ich_patients']) * 100), 2) if x['ich_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NEUROSURGERY", value=1, new_column_name='# neurosurgery - Yes')
self.statsDf['% neurosurgery - Yes'] = self.statsDf.apply(lambda x: round(((x['# neurosurgery - Yes']/(x['ich_patients'] - x['# neurosurgery - Not known'])) * 100), 2) if (x['ich_patients'] - x['# neurosurgery - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NEUROSURGERY", value=2, new_column_name='# neurosurgery - No')
self.statsDf['% neurosurgery - No'] = self.statsDf.apply(lambda x: round(((x['# neurosurgery - No']/(x['ich_patients'] - x['# neurosurgery - Not known'])) * 100), 2) if (x['ich_patients'] - x['# neurosurgery - Not known']) > 0 else 0, axis=1)
#####################
# NEUROSURGERY TYPE #
#####################
# Create temporary dataframe of patients who have undergone neurosurgery
neurosurgery = ich[ich['NEUROSURGERY'].isin([1])].copy()
if neurosurgery.empty:
# If no data available set 0 to all variables
self.statsDf['neurosurgery_patients'] = 0
self.statsDf['# neurosurgery type - intracranial hematoma evacuation'] = 0
self.statsDf['% neurosurgery type - intracranial hematoma evacuation'] = 0
self.statsDf['# neurosurgery type - external ventricular drainage'] = 0
self.statsDf['% neurosurgery type - external ventricular drainage'] = 0
self.statsDf['# neurosurgery type - decompressive craniectomy'] = 0
self.statsDf['% neurosurgery type - decompressive craniectomy'] = 0
self.statsDf['# neurosurgery type - Referred to another centre'] = 0
self.statsDf['% neurosurgery type - Referred to another centre'] = 0
else:
self.tmp = neurosurgery.groupby(['Protocol ID', 'NEUROSURGERY_TYPE']).size().to_frame('count').reset_index()
self.statsDf['neurosurgery_patients'] = self._count_patients(dataframe=neurosurgery)
self.statsDf = self._get_values_for_factors(column_name="NEUROSURGERY_TYPE", value=1, new_column_name='# neurosurgery type - intracranial hematoma evacuation')
self.statsDf['% neurosurgery type - intracranial hematoma evacuation'] = self.statsDf.apply(lambda x: round(((x['# neurosurgery type - intracranial hematoma evacuation']/x['neurosurgery_patients']) * 100), 2) if x['neurosurgery_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NEUROSURGERY_TYPE", value=2, new_column_name='# neurosurgery type - external ventricular drainage')
self.statsDf['% neurosurgery type - external ventricular drainage'] = self.statsDf.apply(lambda x: round(((x['# neurosurgery type - external ventricular drainage']/x['neurosurgery_patients']) * 100), 2) if x['neurosurgery_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NEUROSURGERY_TYPE", value=3, new_column_name='# neurosurgery type - decompressive craniectomy')
self.statsDf['% neurosurgery type - decompressive craniectomy'] = self.statsDf.apply(lambda x: round(((x['# neurosurgery type - decompressive craniectomy']/x['neurosurgery_patients']) * 100), 2) if x['neurosurgery_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="NEUROSURGERY_TYPE", value=4, new_column_name='# neurosurgery type - Referred to another centre')
self.statsDf['% neurosurgery type - Referred to another centre'] = self.statsDf.apply(lambda x: round(((x['# neurosurgery type - Referred to another centre']/x['neurosurgery_patients']) * 100), 2) if x['neurosurgery_patients'] > 0 else 0, axis=1)
del neurosurgery
###################
# BLEEDING REASON #
###################
self.tmp = ich.groupby(['Protocol ID', 'BLEEDING_REASON']).size().to_frame('count').reset_index()
self.tmp['BLEEDING_REASON'] = self.tmp['BLEEDING_REASON'].astype(str)
# Get number of patients entered in older form
self.statsDf = self._get_values_for_factors(column_name="BLEEDING_REASON", value='-999', new_column_name='tmp')
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_REASON", value='1', new_column_name='# bleeding reason - arterial hypertension')
self.statsDf['% bleeding reason - arterial hypertension'] = self.statsDf.apply(lambda x: round(((x['# bleeding reason - arterial hypertension']/(x['ich_patients'] - x['tmp'])) * 100), 2) if (x['ich_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_REASON", value="2", new_column_name='# bleeding reason - aneurysm')
self.statsDf['% bleeding reason - aneurysm'] = self.statsDf.apply(lambda x: round(((x['# bleeding reason - aneurysm']/(x['ich_patients'] - x['tmp'])) * 100), 2) if (x['ich_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_REASON", value="3", new_column_name='# bleeding reason - arterio-venous malformation')
self.statsDf['% bleeding reason - arterio-venous malformation'] = self.statsDf.apply(lambda x: round(((x['# bleeding reason - arterio-venous malformation']/(x['ich_patients'] - x['tmp'])) * 100), 2) if (x['ich_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_REASON", value="4", new_column_name='# bleeding reason - anticoagulation therapy')
self.statsDf['% bleeding reason - anticoagulation therapy'] = self.statsDf.apply(lambda x: round(((x['# bleeding reason - anticoagulation therapy']/(x['ich_patients'] - x['tmp'])) * 100), 2) if (x['ich_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_REASON", value="5", new_column_name='# bleeding reason - amyloid angiopathy')
self.statsDf['% bleeding reason - amyloid angiopathy'] = self.statsDf.apply(lambda x: round(((x['# bleeding reason - amyloid angiopathy']/(x['ich_patients'] - x['tmp'])) * 100), 2) if (x['ich_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_REASON", value="6", new_column_name='# bleeding reason - Other')
self.statsDf['% bleeding reason - Other'] = self.statsDf.apply(lambda x: round(((x['# bleeding reason - Other']/(x['ich_patients'] - x['tmp'])) * 100), 2) if (x['ich_patients'] - x['tmp']) > 0 else 0, axis=1)
### DATA NORMALIZATION
norm_tmp = self.statsDf[['% bleeding reason - arterial hypertension', '% bleeding reason - aneurysm', '% bleeding reason - arterio-venous malformation', '% bleeding reason - anticoagulation therapy', '% bleeding reason - amyloid angiopathy', '% bleeding reason - Other']].copy()
norm_tmp.loc[:, 'rowsums'] = norm_tmp.sum(axis=1)
self.statsDf['bleeding_arterial_hypertension_perc_norm'] = ((norm_tmp['% bleeding reason - arterial hypertension']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['bleeding_aneurysm_perc_norm'] = ((norm_tmp['% bleeding reason - aneurysm']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['bleeding_arterio_venous_malformation_perc_norm'] = ((norm_tmp['% bleeding reason - arterio-venous malformation']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['bleeding_anticoagulation_therapy_perc_norm'] = ((norm_tmp['% bleeding reason - anticoagulation therapy']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['bleeding_amyloid_angiopathy_perc_norm'] = ((norm_tmp['% bleeding reason - amyloid angiopathy']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['bleeding_other_perc_norm'] = ((norm_tmp['% bleeding reason - Other']/norm_tmp['rowsums']) * 100).round(decimals=2)
del norm_tmp
# MORE THAN ONE POSIBILITY
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_REASON", value=",", new_column_name='# bleeding reason - more than one')
self.statsDf['% bleeding reason - more than one'] = self.statsDf.apply(lambda x: round(((x['# bleeding reason - more than one']/(x['ich_patients'] - x['tmp'])) * 100), 2) if (x['ich_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
###################
# BLEEDING SOURCE #
###################
self.tmp = sah.groupby(['Protocol ID', 'BLEEDING_SOURCE']).size().to_frame('count').reset_index()
self.tmp['BLEEDING_SOURCE'] = self.tmp['BLEEDING_SOURCE'].astype(str)
# Get number of patients entered in older form
# self.statsDf = self._get_values_for_factors(column_name="BLEEDING_SOURCE", value='-999', new_column_name='tmp')
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_SOURCE", value='-999', new_column_name='tmp')
# self.statsDf = self._get_values_for_factors(column_name="BLEEDING_SOURCE", value='1', new_column_name='# bleeding source - Known')
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_SOURCE", value='1', new_column_name='# bleeding source - Known')
self.statsDf['% bleeding source - Known'] = self.statsDf.apply(lambda x: round(((x['# bleeding source - Known']/(x['sah_patients'] - x['tmp'])) * 100), 2) if (x['sah_patients'] - x['tmp']) > 0 else 0, axis=1)
# self.statsDf = self._get_values_for_factors(column_name="BLEEDING_SOURCE", value='2', new_column_name='# bleeding source - Not known')
self.statsDf = self._get_values_for_factors_containing(column_name="BLEEDING_SOURCE", value='2', new_column_name='# bleeding source - Not known')
self.statsDf['% bleeding source - Not known'] = self.statsDf.apply(lambda x: round(((x['# bleeding source - Not known']/(x['sah_patients'] - x['tmp'])) * 100), 2) if (x['sah_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
################
# INTERVENTION #
################
self.tmp = sah.groupby(['Protocol ID', 'INTERVENTION']).size().to_frame('count').reset_index()
self.tmp['INTERVENTION'] = self.tmp['INTERVENTION'].astype(str)
# Get number of patients entered in older form
self.statsDf = self._get_values_for_factors(column_name="INTERVENTION", value=-999, new_column_name='tmp')
self.statsDf = self._get_values_for_factors_containing(column_name="INTERVENTION", value="1", new_column_name='# intervention - endovascular (coiling)')
self.statsDf['% intervention - endovascular (coiling)'] = self.statsDf.apply(lambda x: round(((x['# intervention - endovascular (coiling)']/(x['sah_patients'] - x['tmp'])) * 100), 2) if (x['sah_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="INTERVENTION", value="2", new_column_name='# intervention - neurosurgical (clipping)')
self.statsDf['% intervention - neurosurgical (clipping)'] = self.statsDf.apply(lambda x: round(((x['# intervention - neurosurgical (clipping)']/(x['sah_patients'] - x['tmp'])) * 100), 2) if (x['sah_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="INTERVENTION", value="3", new_column_name='# intervention - Other neurosurgical treatment (decompression, drainage)')
self.statsDf['% intervention - Other neurosurgical treatment (decompression, drainage)'] = self.statsDf.apply(lambda x: round(((x['# intervention - Other neurosurgical treatment (decompression, drainage)']/(x['sah_patients'] - x['tmp'])) * 100), 2) if (x['sah_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="INTERVENTION", value="4", new_column_name='# intervention - Referred to another hospital for intervention')
self.statsDf['% intervention - Referred to another hospital for intervention'] = self.statsDf.apply(lambda x: round(((x['# intervention - Referred to another hospital for intervention']/(x['sah_patients'] - x['tmp'])) * 100), 2) if (x['sah_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="INTERVENTION", value="5|6", new_column_name='# intervention - None / no intervention')
self.statsDf['% intervention - None / no intervention'] = self.statsDf.apply(lambda x: round(((x['# intervention - None / no intervention']/(x['sah_patients'] - x['tmp'])) * 100), 2) if (x['sah_patients'] - x['tmp']) > 0 else 0, axis=1)
### DATA NORMALIZATION
norm_tmp = self.statsDf[['% intervention - endovascular (coiling)', '% intervention - neurosurgical (clipping)', '% intervention - Other neurosurgical treatment (decompression, drainage)', '% intervention - Referred to another hospital for intervention', '% intervention - None / no intervention']].copy()
norm_tmp.loc[:, 'rowsums'] = norm_tmp.sum(axis=1)
self.statsDf['intervention_endovascular_perc_norm'] = ((norm_tmp['% intervention - endovascular (coiling)']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['intervention_neurosurgical_perc_norm'] = ((norm_tmp['% intervention - neurosurgical (clipping)']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['intervention_other_perc_norm'] = ((norm_tmp['% intervention - Other neurosurgical treatment (decompression, drainage)']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['intervention_referred_perc_norm'] = ((norm_tmp['% intervention - Referred to another hospital for intervention']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['intervention_none_perc_norm'] = ((norm_tmp['% intervention - None / no intervention']/norm_tmp['rowsums']) * 100).round(decimals=2)
del norm_tmp
self.statsDf = self._get_values_for_factors_containing(column_name="INTERVENTION", value=",", new_column_name='# intervention - more than one')
self.statsDf['% intervention - more than one'] = self.statsDf.apply(lambda x: round(((x['# intervention - more than one']/(x['sah_patients'] - x['tmp'])) * 100), 2) if (x['sah_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
################
# VT TREATMENT #
################
if ('VT_TREATMENT' not in cvt.columns):
cvt['VT_TREATMENT'] = np.nan
self.tmp = cvt.groupby(['Protocol ID', 'VT_TREATMENT']).size().to_frame('count').reset_index()
self.tmp[['VT_TREATMENT']] = self.tmp[['VT_TREATMENT']].astype(str)
self.statsDf = self._get_values_for_factors_containing(column_name="VT_TREATMENT", value="1", new_column_name='# VT treatment - anticoagulation')
self.statsDf['% VT treatment - anticoagulation'] = self.statsDf.apply(lambda x: round(((x['# VT treatment - anticoagulation']/x['cvt_patients']) * 100), 2) if x['cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="VT_TREATMENT", value="2", new_column_name='# VT treatment - thrombectomy')
self.statsDf['% VT treatment - thrombectomy'] = self.statsDf.apply(lambda x: round(((x['# VT treatment - thrombectomy']/x['cvt_patients']) * 100), 2) if x['cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="VT_TREATMENT", value="3", new_column_name='# VT treatment - local thrombolysis')
self.statsDf['% VT treatment - local thrombolysis'] = self.statsDf.apply(lambda x: round(((x['# VT treatment - local thrombolysis']/x['cvt_patients']) * 100), 2) if x['cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="VT_TREATMENT", value="4", new_column_name='# VT treatment - local neurological treatment')
self.statsDf['% VT treatment - local neurological treatment'] = self.statsDf.apply(lambda x: round(((x['# VT treatment - local neurological treatment']/x['cvt_patients']) * 100), 2) if x['cvt_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="VT_TREATMENT", value=",", new_column_name='# VT treatment - more than one treatment')
self.statsDf['% VT treatment - more than one treatment'] = self.statsDf.apply(lambda x: round(((x['# VT treatment - more than one treatment']/x['cvt_patients']) * 100), 2) if x['cvt_patients'] > 0 else 0, axis=1)
### DATA NORMALIZATION
norm_tmp = self.statsDf[['% VT treatment - anticoagulation', '% VT treatment - thrombectomy', '% VT treatment - local thrombolysis', '% VT treatment - local neurological treatment']].copy()
norm_tmp.loc[:, 'rowsums'] = norm_tmp.sum(axis=1)
self.statsDf['vt_treatment_anticoagulation_perc_norm'] = ((norm_tmp['% VT treatment - anticoagulation']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['vt_treatment_thrombectomy_perc_norm'] = ((norm_tmp['% VT treatment - thrombectomy']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['vt_treatment_local_thrombolysis_perc_norm'] = ((norm_tmp['% VT treatment - local thrombolysis']/norm_tmp['rowsums']) * 100).round(decimals=2)
self.statsDf['vt_treatment_local_neurological_treatment_perc_norm'] = ((norm_tmp['% VT treatment - local neurological treatment']/norm_tmp['rowsums']) * 100).round(decimals=2)
del norm_tmp
########
# AFIB #
########
# tag::afib[]
if country_code == 'CZ':
not_reffered = is_tia.loc[~(is_tia['crf_parent_name'].isin(['F_RESQ_IVT_TBY_CZ_4']) & is_tia['RECANALIZATION_PROCEDURES'].isin([5,6,8]))].copy()
self.statsDf['not_reffered_patients'] = self._count_patients(dataframe=not_reffered)
# Create dataframe with the patients referred to another hospital
reffered = is_tia[is_tia['RECANALIZATION_PROCEDURES'].isin([5,6,8])].copy()
self.statsDf['reffered_patients'] = self._count_patients(dataframe=reffered)
self.tmp = not_reffered.groupby(['Protocol ID', 'AFIB_FLUTTER']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=1, new_column_name='# afib/flutter - Known')
self.statsDf['% afib/flutter - Known'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Known']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=2, new_column_name='# afib/flutter - Newly-detected at admission')
self.statsDf['% afib/flutter - Newly-detected at admission'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Newly-detected at admission']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=3, new_column_name='# afib/flutter - Detected during hospitalization')
self.statsDf['% afib/flutter - Detected during hospitalization'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Detected during hospitalization']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=4, new_column_name='# afib/flutter - Not detected')
self.statsDf['% afib/flutter - Not detected'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Not detected']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=5, new_column_name='# afib/flutter - Not known')
self.statsDf['% afib/flutter - Not known'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Not known']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf['afib_flutter_detected_only'] = self.statsDf['# afib/flutter - Newly-detected at admission'] + self.statsDf['# afib/flutter - Detected during hospitalization']
self.statsDf['% patients detected for aFib'] = self.statsDf.apply(lambda x: round(((x['afib_flutter_detected_only']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
else:
not_reffered = is_tia[~is_tia['RECANALIZATION_PROCEDURES'].isin([7])].copy()
self.statsDf['not_reffered_patients'] = self._count_patients(dataframe=not_reffered)
# Create dataframe with the patients referred to another hospital
reffered = is_tia[is_tia['RECANALIZATION_PROCEDURES'].isin([7])].copy()
self.statsDf['reffered_patients'] = self._count_patients(dataframe=reffered)
self.tmp = not_reffered.groupby(['Protocol ID', 'AFIB_FLUTTER']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=1, new_column_name='# afib/flutter - Known')
self.statsDf['% afib/flutter - Known'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Known']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=2, new_column_name='# afib/flutter - Newly-detected at admission')
self.statsDf['% afib/flutter - Newly-detected at admission'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Newly-detected at admission']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=3, new_column_name='# afib/flutter - Detected during hospitalization')
self.statsDf['% afib/flutter - Detected during hospitalization'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Detected during hospitalization']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=4, new_column_name='# afib/flutter - Not detected')
self.statsDf['% afib/flutter - Not detected'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Not detected']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_FLUTTER", value=5, new_column_name='# afib/flutter - Not known')
self.statsDf['% afib/flutter - Not known'] = self.statsDf.apply(lambda x: round(((x['# afib/flutter - Not known']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
self.statsDf['afib_flutter_detected_only'] = self.statsDf['# afib/flutter - Newly-detected at admission'] + self.statsDf['# afib/flutter - Detected during hospitalization']
self.statsDf['% patients detected for aFib'] = self.statsDf.apply(lambda x: round(((x['afib_flutter_detected_only']/(x['is_tia_patients'] - x['reffered_patients'])) * 100), 2) if (x['is_tia_patients'] - x['reffered_patients']) > 0 else 0, axis=1)
# end::afib[]
#########################
# AFIB DETECTION METHOD #
#########################
if country_code == 'CZ':
afib_detected_during_hospitalization = not_reffered[not_reffered['AFIB_FLUTTER'].isin([3])].copy()
self.statsDf['afib_detected_during_hospitalization_patients'] = self._count_patients(dataframe=afib_detected_during_hospitalization)
afib_detected_during_hospitalization['AFIB_DETECTION_METHOD'] = afib_detected_during_hospitalization['AFIB_DETECTION_METHOD'].astype(str) # Convert values to string
self.tmp = afib_detected_during_hospitalization.groupby(['Protocol ID', 'AFIB_DETECTION_METHOD']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors_containing(column_name="AFIB_DETECTION_METHOD", value="1", new_column_name='# afib detection method - Telemetry with monitor allowing automatic detection of aFib')
self.statsDf['% afib detection method - Telemetry with monitor allowing automatic detection of aFib'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - Telemetry with monitor allowing automatic detection of aFib']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="AFIB_DETECTION_METHOD", value="2", new_column_name='# afib detection method - Telemetry without monitor allowing automatic detection of aFib')
self.statsDf['% afib detection method - Telemetry without monitor allowing automatic detection of aFib'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - Telemetry without monitor allowing automatic detection of aFib']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="AFIB_DETECTION_METHOD", value="3", new_column_name='# afib detection method - Holter-type monitoring')
self.statsDf['% afib detection method - Holter-type monitoring'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - Holter-type monitoring']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="AFIB_DETECTION_METHOD", value="4", new_column_name='# afib detection method - EKG monitoring in an ICU bed with automatic detection of aFib')
self.statsDf['% afib detection method - EKG monitoring in an ICU bed with automatic detection of aFib'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - EKG monitoring in an ICU bed with automatic detection of aFib']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors_containing(column_name="AFIB_DETECTION_METHOD", value="5", new_column_name='# afib detection method - EKG monitoring in an ICU bed without automatic detection of aFib')
self.statsDf['% afib detection method - EKG monitoring in an ICU bed without automatic detection of aFib'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - EKG monitoring in an ICU bed without automatic detection of aFib']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
else:
afib_detected_during_hospitalization = not_reffered[not_reffered['AFIB_FLUTTER'].isin([3])].copy()
self.statsDf['afib_detected_during_hospitalization_patients'] = self._count_patients(dataframe=afib_detected_during_hospitalization)
afib_detected_during_hospitalization['AFIB_DETECTION_METHOD'] = afib_detected_during_hospitalization['AFIB_DETECTION_METHOD'].astype(str)
self.tmp = afib_detected_during_hospitalization.groupby(['Protocol ID', 'AFIB_DETECTION_METHOD']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="AFIB_DETECTION_METHOD", value=1, new_column_name='# afib detection method - Telemetry with monitor allowing automatic detection of aFib')
self.statsDf['% afib detection method - Telemetry with monitor allowing automatic detection of aFib'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - Telemetry with monitor allowing automatic detection of aFib']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_DETECTION_METHOD", value=2, new_column_name='# afib detection method - Telemetry without monitor allowing automatic detection of aFib')
self.statsDf['% afib detection method - Telemetry without monitor allowing automatic detection of aFib'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - Telemetry without monitor allowing automatic detection of aFib']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_DETECTION_METHOD", value=3, new_column_name='# afib detection method - Holter-type monitoring')
self.statsDf['% afib detection method - Holter-type monitoring'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - Holter-type monitoring']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_DETECTION_METHOD", value=4, new_column_name='# afib detection method - EKG monitoring in an ICU bed with automatic detection of aFib')
self.statsDf['% afib detection method - EKG monitoring in an ICU bed with automatic detection of aFib'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - EKG monitoring in an ICU bed with automatic detection of aFib']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_DETECTION_METHOD", value=5, new_column_name='# afib detection method - EKG monitoring in an ICU bed without automatic detection of aFib')
self.statsDf['% afib detection method - EKG monitoring in an ICU bed without automatic detection of aFib'] = self.statsDf.apply(lambda x: round(((x['# afib detection method - EKG monitoring in an ICU bed without automatic detection of aFib']/x['afib_detected_during_hospitalization_patients']) * 100), 2) if x['afib_detected_during_hospitalization_patients'] > 0 else 0, axis=1)
###############################
# AFIB OTHER DETECTION METHOD #
###############################
afib_not_detected_or_not_known = not_reffered[not_reffered['AFIB_FLUTTER'].isin([4, 5])].copy()
self.statsDf['afib_not_detected_or_not_known_patients'] = self._count_patients(dataframe=afib_not_detected_or_not_known)
self.tmp = afib_not_detected_or_not_known.groupby(['Protocol ID', 'AFIB_OTHER_RECS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="AFIB_OTHER_RECS", value=1, new_column_name='# other afib detection method - Yes')
self.statsDf['% other afib detection method - Yes'] = self.statsDf.apply(lambda x: round(((x['# other afib detection method - Yes']/x['afib_not_detected_or_not_known_patients']) * 100), 2) if x['afib_not_detected_or_not_known_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="AFIB_OTHER_RECS", value=2, new_column_name='# other afib detection method - Not detected or not known')
self.statsDf['% other afib detection method - Not detected or not known'] = self.statsDf.apply(lambda x: round(((x['# other afib detection method - Not detected or not known']/x['afib_not_detected_or_not_known_patients']) * 100), 2) if x['afib_not_detected_or_not_known_patients'] > 0 else 0, axis=1)
############################
# CAROTID ARTERIES IMAGING #
############################
if country_code == 'CZ':
print(period)
if (not comparison and self.period.startswith('Q1') and self.period.endswith('2019')):
self.statsDf.loc[:, '# carotid arteries imaging - Not known'] = 'N/A'
self.statsDf.loc[:, '% carotid arteries imaging - Not known'] = 'N/A'
self.statsDf.loc[:, '# carotid arteries imaging - Yes'] = 'N/A'
self.statsDf.loc[:, '% carotid arteries imaging - Yes'] = 'N/A'
self.statsDf.loc[:, '# carotid arteries imaging - No'] = 'N/A'
self.statsDf.loc[:, '% carotid arteries imaging - No'] = 'N/A'
elif (not comparison and (self.period.startswith('March_Oct') and self.period.endswith('2019'))):
date1 = date(2019, 10, 1)
date2 = date(2019, 10, 31)
obj = FilterDataset(df=self.raw_data, country='CZ', date1=date1, date2=date2)
cz_df = obj.fdf.copy()
site_ids = self.statsDf['Protocol ID'].tolist()
cz_df = cz_df.loc[cz_df['Protocol ID'].isin(site_ids)].copy()
if (country):
country_df = cz_df.copy()
#self.country_name = pytz.country_names[country_code]
# country['Protocol ID'] = self.country_name
#country['Site Name'] = self.country_name
country_df['Protocol ID'] = country_df['Country']
country_df['Site Name'] = country_df['Country']
cz_df = pd.concat([cz_df, country_df])
del country_df
cz_df_is_tia = cz_df.loc[cz_df['STROKE_TYPE'].isin([1,3])].copy()
self.statsDf['cz_df_is_tia_pts'] = self._count_patients(dataframe=cz_df_is_tia)
self.tmp = cz_df_is_tia.groupby(['Protocol ID', 'CAROTID_ARTERIES_IMAGING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=3, new_column_name='# carotid arteries imaging - Not known')
self.statsDf['% carotid arteries imaging - Not known'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Not known']/x['cz_df_is_tia_pts']) * 100), 2) if x['cz_df_is_tia_pts'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=1, new_column_name='# carotid arteries imaging - Yes')
self.statsDf['% carotid arteries imaging - Yes'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Yes']/(x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=2, new_column_name='# carotid arteries imaging - No')
self.statsDf['% carotid arteries imaging - No'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - No']/(x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
del cz_df_is_tia, cz_df
elif (not comparison and (self.period.startswith('Q2') or self.period.startswith('H1')) and self.period.endswith('2019')):
date1 = date(2019, 7, 19)
date2 = date(2019, 8, 31)
obj = FilterDataset(df=self.raw_data, country='CZ', date1=date1, date2=date2)
cz_df = obj.fdf.copy()
site_ids = self.statsDf['Protocol ID'].tolist()
cz_df = cz_df.loc[cz_df['Protocol ID'].isin(site_ids)].copy()
if (country):
country_df = cz_df.copy()
#self.country_name = pytz.country_names[country_code]
# country['Protocol ID'] = self.country_name
#country['Site Name'] = self.country_name
country_df['Protocol ID'] = country_df['Country']
country_df['Site Name'] = country_df['Country']
cz_df = pd.concat([cz_df, country_df])
del country_df
cz_df_is_tia = cz_df.loc[cz_df['STROKE_TYPE'].isin([1,3])].copy()
self.statsDf['cz_df_is_tia_pts'] = self._count_patients(dataframe=cz_df_is_tia)
self.tmp = cz_df_is_tia.groupby(['Protocol ID', 'CAROTID_ARTERIES_IMAGING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=3, new_column_name='# carotid arteries imaging - Not known')
self.statsDf['% carotid arteries imaging - Not known'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Not known']/x['cz_df_is_tia_pts']) * 100), 2) if x['cz_df_is_tia_pts'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=1, new_column_name='# carotid arteries imaging - Yes')
self.statsDf['% carotid arteries imaging - Yes'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Yes']/(x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=2, new_column_name='# carotid arteries imaging - No')
self.statsDf['% carotid arteries imaging - No'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - No']/(x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
del cz_df_is_tia, cz_df
elif (not comparison and self.period == '2019'):
date1 = date(2019, 7, 19)
date2 = date(2019, 12, 31)
obj = FilterDataset(df=self.raw_data, country='CZ', date1=date1, date2=date2)
cz_df = obj.fdf.copy()
site_ids = self.statsDf['Protocol ID'].tolist()
cz_df = cz_df.loc[cz_df['Protocol ID'].isin(site_ids)].copy()
if (country):
country_df = cz_df.copy()
#self.country_name = pytz.country_names[country_code]
# country['Protocol ID'] = self.country_name
#country['Site Name'] = self.country_name
country_df['Protocol ID'] = country_df['Country']
country_df['Site Name'] = country_df['Country']
cz_df = pd.concat([cz_df, country_df])
del country_df
cz_df_is_tia = cz_df.loc[cz_df['STROKE_TYPE'].isin([1,3])].copy()
self.statsDf['cz_df_is_tia_pts'] = self._count_patients(dataframe=cz_df_is_tia)
self.tmp = cz_df_is_tia.groupby(['Protocol ID', 'CAROTID_ARTERIES_IMAGING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=3, new_column_name='# carotid arteries imaging - Not known')
self.statsDf['% carotid arteries imaging - Not known'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Not known']/x['cz_df_is_tia_pts']) * 100), 2) if x['cz_df_is_tia_pts'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=1, new_column_name='# carotid arteries imaging - Yes')
self.statsDf['% carotid arteries imaging - Yes'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Yes']/(x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=2, new_column_name='# carotid arteries imaging - No')
self.statsDf['% carotid arteries imaging - No'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - No']/(x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['cz_df_is_tia_pts'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
del cz_df_is_tia, cz_df
else:
self.tmp = is_tia.groupby(['Protocol ID', 'CAROTID_ARTERIES_IMAGING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=3, new_column_name='# carotid arteries imaging - Not known')
self.statsDf['% carotid arteries imaging - Not known'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Not known']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=1, new_column_name='# carotid arteries imaging - Yes')
self.statsDf['% carotid arteries imaging - Yes'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Yes']/(x['is_tia_patients'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['is_tia_patients'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=2, new_column_name='# carotid arteries imaging - No')
self.statsDf['% carotid arteries imaging - No'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - No']/(x['is_tia_patients'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['is_tia_patients'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
if 'cz_df_is_tia_pts' in self.statsDf.columns:
self.statsDf.drop(['cz_df_is_tia_pts'], inplace=True, axis=1)
else:
self.tmp = is_tia.groupby(['Protocol ID', 'CAROTID_ARTERIES_IMAGING']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=3, new_column_name='# carotid arteries imaging - Not known')
self.statsDf['% carotid arteries imaging - Not known'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Not known']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=1, new_column_name='# carotid arteries imaging - Yes')
self.statsDf['% carotid arteries imaging - Yes'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - Yes']/(x['is_tia_patients'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['is_tia_patients'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_ARTERIES_IMAGING", value=2, new_column_name='# carotid arteries imaging - No')
self.statsDf['% carotid arteries imaging - No'] = self.statsDf.apply(lambda x: round(((x['# carotid arteries imaging - No']/(x['is_tia_patients'] - x['# carotid arteries imaging - Not known'])) * 100), 2) if (x['is_tia_patients'] - x['# carotid arteries imaging - Not known']) > 0 else 0, axis=1)
############################
# ANTITHROMBOTICS WITH CVT #
############################
# Create dataframe with dead patients excluded
antithrombotics_with_cvt = is_tia_cvt[~is_tia_cvt['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['antithrombotics_patients_with_cvt'] = self._count_patients(dataframe=antithrombotics_with_cvt)
ischemic_transient_cerebral_dead = is_tia_cvt[is_tia_cvt['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['ischemic_transient_cerebral_dead_patients'] = self._count_patients(dataframe=ischemic_transient_cerebral_dead)
self.tmp = antithrombotics_with_cvt.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
del antithrombotics_with_cvt, ischemic_transient_cerebral_dead
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# patients receiving antiplatelets with CVT')
self.statsDf['% patients receiving antiplatelets with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients receiving antiplatelets with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=2, new_column_name='# patients receiving Vit. K antagonist with CVT')
self.statsDf['% patients receiving Vit. K antagonist with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients receiving Vit. K antagonist with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=3, new_column_name='# patients receiving dabigatran with CVT')
self.statsDf['% patients receiving dabigatran with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients receiving dabigatran with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=4, new_column_name='# patients receiving rivaroxaban with CVT')
self.statsDf['% patients receiving rivaroxaban with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients receiving rivaroxaban with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=5, new_column_name='# patients receiving apixaban with CVT')
self.statsDf['% patients receiving apixaban with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients receiving apixaban with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=6, new_column_name='# patients receiving edoxaban with CVT')
self.statsDf['% patients receiving edoxaban with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients receiving edoxaban with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=7, new_column_name='# patients receiving LMWH or heparin in prophylactic dose with CVT')
self.statsDf['% patients receiving LMWH or heparin in prophylactic dose with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in prophylactic dose with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=8, new_column_name='# patients receiving LMWH or heparin in full anticoagulant dose with CVT')
self.statsDf['% patients receiving LMWH or heparin in full anticoagulant dose with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in full anticoagulant dose with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=9, new_column_name='# patients not prescribed antithrombotics, but recommended with CVT')
self.statsDf['% patients not prescribed antithrombotics, but recommended with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients not prescribed antithrombotics, but recommended with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=10, new_column_name='# patients neither receiving antithrombotics nor recommended with CVT')
self.statsDf['% patients neither receiving antithrombotics nor recommended with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients neither receiving antithrombotics nor recommended with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
## ANTITHROMBOTICS - PATIENTS PRESCRIBED + RECOMMENDED
self.statsDf.loc[:, '# patients prescribed antithrombotics with CVT'] = self.statsDf.apply(lambda x: x['# patients receiving antiplatelets with CVT'] + x['# patients receiving Vit. K antagonist with CVT'] + x['# patients receiving dabigatran with CVT'] + x['# patients receiving rivaroxaban with CVT'] + x['# patients receiving apixaban with CVT'] + x['# patients receiving edoxaban with CVT'] + x['# patients receiving LMWH or heparin in prophylactic dose with CVT'] + x['# patients receiving LMWH or heparin in full anticoagulant dose with CVT'], axis=1)
# self.statsDf['% patients prescribed antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended']) > 0 else 0, axis=1)
self.statsDf['% patients prescribed antithrombotics with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics with CVT']/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients']) > 0 else 0, axis=1)
self.statsDf.loc[:, '# patients prescribed or recommended antithrombotics with CVT'] = self.statsDf.apply(lambda x: x['# patients receiving antiplatelets with CVT'] + x['# patients receiving Vit. K antagonist with CVT'] + x['# patients receiving dabigatran with CVT'] + x['# patients receiving rivaroxaban with CVT'] + x['# patients receiving apixaban with CVT'] + x['# patients receiving edoxaban with CVT'] + x['# patients receiving LMWH or heparin in prophylactic dose with CVT'] + x['# patients receiving LMWH or heparin in full anticoagulant dose with CVT'] + x['# patients not prescribed antithrombotics, but recommended with CVT'], axis=1)
self.statsDf['% patients prescribed or recommended antithrombotics with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed or recommended antithrombotics with CVT'] - x['ischemic_transient_cerebral_dead_patients'])/(x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended with CVT'])) * 100, 2) if ((x['is_tia_cvt_patients'] - x['ischemic_transient_cerebral_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended with CVT']) > 0) else 0, axis=1)
self.statsDf.fillna(0, inplace=True)
###########################################
# ANTIPLATELETS - PRESCRIBED WITHOUT AFIB #
###########################################
afib_flutter_not_detected_or_not_known_with_cvt = is_tia_cvt[is_tia_cvt['AFIB_FLUTTER'].isin([4, 5])].copy()
self.statsDf['afib_flutter_not_detected_or_not_known_patients_with_cvt'] = self._count_patients(dataframe=afib_flutter_not_detected_or_not_known_with_cvt)
afib_flutter_not_detected_or_not_known_with_cvt_dead = afib_flutter_not_detected_or_not_known_with_cvt[afib_flutter_not_detected_or_not_known_with_cvt['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['afib_flutter_not_detected_or_not_known_dead_patients_with_cvt'] = self._count_patients(dataframe=afib_flutter_not_detected_or_not_known_with_cvt_dead)
prescribed_antiplatelets_no_afib_with_cvt = afib_flutter_not_detected_or_not_known_with_cvt[afib_flutter_not_detected_or_not_known_with_cvt['ANTITHROMBOTICS'].isin([1])].copy()
self.statsDf['prescribed_antiplatelets_no_afib_patients_with_cvt'] = self._count_patients(dataframe=prescribed_antiplatelets_no_afib_with_cvt)
prescribed_antiplatelets_no_afib_dead_with_cvt = prescribed_antiplatelets_no_afib_with_cvt[prescribed_antiplatelets_no_afib_with_cvt['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['prescribed_antiplatelets_no_afib_dead_patients_with_cvt'] = self._count_patients(dataframe=prescribed_antiplatelets_no_afib_dead_with_cvt)
self.tmp = afib_flutter_not_detected_or_not_known_with_cvt.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# patients prescribed antiplatelets without aFib with CVT')
self.statsDf['% patients prescribed antiplatelets without aFib with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antiplatelets without aFib with CVT'] - x['prescribed_antiplatelets_no_afib_dead_patients_with_cvt'])/(x['afib_flutter_not_detected_or_not_known_patients_with_cvt'] - x['afib_flutter_not_detected_or_not_known_dead_patients_with_cvt'])) * 100, 2) if ((x['afib_flutter_not_detected_or_not_known_patients_with_cvt'] - x['afib_flutter_not_detected_or_not_known_dead_patients_with_cvt']) > 0) else 0, axis=1)
del afib_flutter_not_detected_or_not_known_with_cvt, afib_flutter_not_detected_or_not_known_with_cvt_dead, prescribed_antiplatelets_no_afib_with_cvt, prescribed_antiplatelets_no_afib_dead_with_cvt
#########################################
# ANTICOAGULANTS - PRESCRIBED WITH AFIB #
#########################################
afib_flutter_detected_with_cvt = is_tia_cvt[is_tia_cvt['AFIB_FLUTTER'].isin([1, 2, 3])].copy()
self.statsDf['afib_flutter_detected_patients_with_cvt'] = self._count_patients(dataframe=afib_flutter_detected_with_cvt)
anticoagulants_prescribed_with_cvt = afib_flutter_detected_with_cvt[~afib_flutter_detected_with_cvt['ANTITHROMBOTICS'].isin([1, 10, 9]) & ~afib_flutter_detected_with_cvt['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['# patients prescribed anticoagulants with aFib with CVT'] = self._count_patients(dataframe=anticoagulants_prescribed_with_cvt)
anticoagulants_recommended_with_cvt = afib_flutter_detected_with_cvt[afib_flutter_detected_with_cvt['ANTITHROMBOTICS'].isin([9])].copy()
self.statsDf['anticoagulants_recommended_patients_with_cvt'] = self._count_patients(dataframe=anticoagulants_recommended_with_cvt)
afib_flutter_detected_dead_with = afib_flutter_detected_with_cvt[afib_flutter_detected_with_cvt['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['afib_flutter_detected_dead_patients_with_cvt'] = self._count_patients(dataframe=afib_flutter_detected_dead_with)
self.statsDf['% patients prescribed anticoagulants with aFib with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed anticoagulants with aFib with CVT']/(x['afib_flutter_detected_patients_with_cvt'] - x['afib_flutter_detected_dead_patients_with_cvt'])) * 100), 2) if (x['afib_flutter_detected_patients_with_cvt'] - x['afib_flutter_detected_dead_patients_with_cvt']) > 0 else 0, axis=1)
##########################################
# ANTITHROMBOTICS - PRESCRIBED WITH AFIB #
##########################################
antithrombotics_prescribed_with_cvt = afib_flutter_detected_with_cvt[~afib_flutter_detected_with_cvt['ANTITHROMBOTICS'].isin([9, 10]) & ~afib_flutter_detected_with_cvt['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['# patients prescribed antithrombotics with aFib with CVT'] = self._count_patients(dataframe=antithrombotics_prescribed_with_cvt)
recommended_antithrombotics_with_afib_alive_with_cvt = afib_flutter_detected_with_cvt[afib_flutter_detected_with_cvt['ANTITHROMBOTICS'].isin([9]) & ~afib_flutter_detected_with_cvt['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['recommended_antithrombotics_with_afib_alive_patients_with_cvt'] = self._count_patients(dataframe=recommended_antithrombotics_with_afib_alive_with_cvt)
self.statsDf['% patients prescribed antithrombotics with aFib with CVT'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics with aFib with CVT']/(x['afib_flutter_detected_patients_with_cvt'] - x['afib_flutter_detected_dead_patients_with_cvt'] - x['recommended_antithrombotics_with_afib_alive_patients_with_cvt'])) * 100), 2) if (x['afib_flutter_detected_dead_patients_with_cvt'] - x['afib_flutter_detected_dead_patients_with_cvt'] - x['recommended_antithrombotics_with_afib_alive_patients_with_cvt']) > 0 else 0, axis=1)
del afib_flutter_detected_with_cvt, anticoagulants_prescribed_with_cvt, anticoagulants_recommended_with_cvt, afib_flutter_detected_dead_with, antithrombotics_prescribed_with_cvt, recommended_antithrombotics_with_afib_alive_with_cvt
###############################
# ANTITHROMBOTICS WITHOUT CVT #
###############################
antithrombotics = is_tia[~is_tia['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['antithrombotics_patients'] = self._count_patients(dataframe=antithrombotics)
ischemic_transient_dead = is_tia[is_tia['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['ischemic_transient_dead_patients'] = self._count_patients(dataframe=ischemic_transient_dead)
del ischemic_transient_dead
ischemic_transient_dead_prescribed = is_tia[is_tia['DISCHARGE_DESTINATION'].isin([5]) & ~is_tia['ANTITHROMBOTICS'].isin([10])].copy()
self.statsDf['ischemic_transient_dead_patients_prescribed'] = self._count_patients(dataframe=ischemic_transient_dead_prescribed)
del ischemic_transient_dead_prescribed
self.tmp = antithrombotics.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# patients receiving antiplatelets')
self.statsDf['% patients receiving antiplatelets'] = self.statsDf.apply(lambda x: round(((x['# patients receiving antiplatelets']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=2, new_column_name='# patients receiving Vit. K antagonist')
# self.statsDf['% patients receiving Vit. K antagonist'] = self.statsDf.apply(lambda x: round(((x['# patients receiving Vit. K antagonist']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=3, new_column_name='# patients receiving dabigatran')
# self.statsDf['% patients receiving dabigatran'] = self.statsDf.apply(lambda x: round(((x['# patients receiving dabigatran']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=4, new_column_name='# patients receiving rivaroxaban')
# self.statsDf['% patients receiving rivaroxaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving rivaroxaban']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=5, new_column_name='# patients receiving apixaban')
# self.statsDf['% patients receiving apixaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving apixaban']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=6, new_column_name='# patients receiving edoxaban')
# self.statsDf['% patients receiving edoxaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving edoxaban']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=7, new_column_name='# patients receiving LMWH or heparin in prophylactic dose')
# self.statsDf['% patients receiving LMWH or heparin in prophylactic dose'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in prophylactic dose']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=8, new_column_name='# patients receiving LMWH or heparin in full anticoagulant dose')
# self.statsDf['% patients receiving LMWH or heparin in full anticoagulant dose'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in full anticoagulant dose']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=9, new_column_name='# patients not prescribed antithrombotics, but recommended')
self.statsDf['% patients not prescribed antithrombotics, but recommended'] = self.statsDf.apply(lambda x: round(((x['# patients not prescribed antithrombotics, but recommended']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=10, new_column_name='# patients neither receiving antithrombotics nor recommended')
self.statsDf['% patients neither receiving antithrombotics nor recommended'] = self.statsDf.apply(lambda x: round(((x['# patients neither receiving antithrombotics nor recommended']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
## ANTITHROMBOTICS - PATIENTS PRESCRIBED + RECOMMENDED
self.statsDf.loc[:, '# patients prescribed antithrombotics'] = self.statsDf.apply(lambda x: x['# patients receiving antiplatelets'] + x['# patients receiving Vit. K antagonist'] + x['# patients receiving dabigatran'] + x['# patients receiving rivaroxaban'] + x['# patients receiving apixaban'] + x['# patients receiving edoxaban'] + x['# patients receiving LMWH or heparin in prophylactic dose'] + x['# patients receiving LMWH or heparin in full anticoagulant dose'], axis=1)
# self.statsDf['% patients prescribed antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics']/(x['is_tia_cvt_patients'] - x['ischemic_transient_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended'])) * 100), 2) if (x['is_tia_cvt_patients'] - x['ischemic_transient_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended']) > 0 else 0, axis=1)
self.statsDf['% patients prescribed antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics']/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100), 2) if (x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0 else 0, axis=1)
self.statsDf.loc[:, '# patients prescribed or recommended antithrombotics'] = self.statsDf.apply(lambda x: x['# patients receiving antiplatelets'] + x['# patients receiving Vit. K antagonist'] + x['# patients receiving dabigatran'] + x['# patients receiving rivaroxaban'] + x['# patients receiving apixaban'] + x['# patients receiving edoxaban'] + x['# patients receiving LMWH or heparin in prophylactic dose'] + x['# patients receiving LMWH or heparin in full anticoagulant dose'] + x['# patients not prescribed antithrombotics, but recommended'], axis=1)
# From patients prescribed or recommended antithrombotics remove patient who had prescribed antithrombotics and were dead (nominator)
# self.statsDf['% patients prescribed or recommended antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed or recommended antithrombotics'] - x['ischemic_transient_dead_patients_prescribed'])/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended'])) * 100, 2) if ((x['is_tia_patients'] - x['ischemic_transient_dead_patients'] - x['# patients not prescribed antithrombotics, but recommended']) > 0) else 0, axis=1)
self.statsDf['% patients prescribed or recommended antithrombotics'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed or recommended antithrombotics'] - x['ischemic_transient_dead_patients_prescribed'])/(x['is_tia_patients'] - x['ischemic_transient_dead_patients'])) * 100, 2) if ((x['is_tia_patients'] - x['ischemic_transient_dead_patients']) > 0) else 0, axis=1)
# Drop the redundant columns
self.statsDf.drop(['# patients receiving Vit. K antagonist', '# patients receiving dabigatran', '# patients receiving rivaroxaban', '# patients receiving apixaban', '# patients receiving edoxaban', '# patients receiving LMWH or heparin in prophylactic dose','# patients receiving LMWH or heparin in full anticoagulant dose'], axis=1, inplace=True)
self.statsDf.fillna(0, inplace=True)
###########################################
# ANTIPLATELETS - PRESCRIBED WITHOUT AFIB #
###########################################
afib_flutter_not_detected_or_not_known = is_tia[is_tia['AFIB_FLUTTER'].isin([4, 5])].copy()
self.statsDf['afib_flutter_not_detected_or_not_known_patients'] = self._count_patients(dataframe=afib_flutter_not_detected_or_not_known)
afib_flutter_not_detected_or_not_known_dead = afib_flutter_not_detected_or_not_known[afib_flutter_not_detected_or_not_known['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['afib_flutter_not_detected_or_not_known_dead_patients'] = self._count_patients(dataframe=afib_flutter_not_detected_or_not_known_dead)
prescribed_antiplatelets_no_afib = afib_flutter_not_detected_or_not_known[afib_flutter_not_detected_or_not_known['ANTITHROMBOTICS'].isin([1])].copy()
self.statsDf['prescribed_antiplatelets_no_afib_patients'] = self._count_patients(dataframe=prescribed_antiplatelets_no_afib)
prescribed_antiplatelets_no_afib_dead = prescribed_antiplatelets_no_afib[prescribed_antiplatelets_no_afib['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['prescribed_antiplatelets_no_afib_dead_patients'] = self._count_patients(dataframe=prescribed_antiplatelets_no_afib_dead)
self.tmp = afib_flutter_not_detected_or_not_known.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# patients prescribed antiplatelets without aFib')
self.statsDf['% patients prescribed antiplatelets without aFib'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antiplatelets without aFib'] - x['prescribed_antiplatelets_no_afib_dead_patients'])/(x['afib_flutter_not_detected_or_not_known_patients'] - x['afib_flutter_not_detected_or_not_known_dead_patients'])) * 100, 2) if ((x['afib_flutter_not_detected_or_not_known_patients'] - x['afib_flutter_not_detected_or_not_known_dead_patients']) > 0) else 0, axis=1)
del afib_flutter_not_detected_or_not_known, afib_flutter_not_detected_or_not_known_dead, prescribed_antiplatelets_no_afib, prescribed_antiplatelets_no_afib_dead
#########################################
# ANTICOAGULANTS - PRESCRIBED WITH AFIB #
#########################################
afib_flutter_detected = is_tia[is_tia['AFIB_FLUTTER'].isin([1, 2, 3])].copy()
self.statsDf['afib_flutter_detected_patients'] = self._count_patients(dataframe=afib_flutter_detected)
afib_flutter_detected_not_dead = afib_flutter_detected[~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['afib_flutter_detected_patients_not_dead'] = self._count_patients(dataframe=afib_flutter_detected_not_dead)
del afib_flutter_detected_not_dead
anticoagulants_prescribed = afib_flutter_detected[
~afib_flutter_detected['ANTITHROMBOTICS'].isin([1, 10, 9]) &
~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])
].copy()
self.statsDf['# patients prescribed anticoagulants with aFib'] = self._count_patients(dataframe=anticoagulants_prescribed)
self.tmp = anticoagulants_prescribed.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
# Additional calculation
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=2, new_column_name='# patients receiving Vit. K antagonist')
# self.statsDf['% patients receiving Vit. K antagonist'] = self.statsDf.apply(lambda x: round(((x['# patients receiving Vit. K antagonist']/x['# patients prescribed anticoagulants with aFib']) * 100), 2) if x['# patients prescribed anticoagulants with aFib'] > 0 else 0, axis=1)
self.statsDf['% patients receiving Vit. K antagonist'] = self.statsDf.apply(lambda x: round(((x['# patients receiving Vit. K antagonist']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=3, new_column_name='# patients receiving dabigatran')
self.statsDf['% patients receiving dabigatran'] = self.statsDf.apply(lambda x: round(((x['# patients receiving dabigatran']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=4, new_column_name='# patients receiving rivaroxaban')
self.statsDf['% patients receiving rivaroxaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving rivaroxaban']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=5, new_column_name='# patients receiving apixaban')
self.statsDf['% patients receiving apixaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving apixaban']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=6, new_column_name='# patients receiving edoxaban')
self.statsDf['% patients receiving edoxaban'] = self.statsDf.apply(lambda x: round(((x['# patients receiving edoxaban']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=7, new_column_name='# patients receiving LMWH or heparin in prophylactic dose')
self.statsDf['% patients receiving LMWH or heparin in prophylactic dose'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in prophylactic dose']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=8, new_column_name='# patients receiving LMWH or heparin in full anticoagulant dose')
self.statsDf['% patients receiving LMWH or heparin in full anticoagulant dose'] = self.statsDf.apply(lambda x: round(((x['# patients receiving LMWH or heparin in full anticoagulant dose']/x['afib_flutter_detected_patients_not_dead']) * 100), 2) if x['afib_flutter_detected_patients_not_dead'] > 0 else 0, axis=1)
anticoagulants_recommended = afib_flutter_detected[afib_flutter_detected['ANTITHROMBOTICS'].isin([9])].copy()
self.statsDf['anticoagulants_recommended_patients'] = self._count_patients(dataframe=anticoagulants_recommended)
afib_flutter_detected_dead = afib_flutter_detected[afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['afib_flutter_detected_dead_patients'] = self._count_patients(dataframe=afib_flutter_detected_dead)
self.statsDf['% patients prescribed anticoagulants with aFib'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed anticoagulants with aFib']/(x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients'])) * 100), 2) if (x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients']) > 0 else 0, axis=1)
##########################################
# ANTITHROMBOTICS - PRESCRIBED WITH AFIB #
##########################################
antithrombotics_prescribed = afib_flutter_detected[~afib_flutter_detected['ANTITHROMBOTICS'].isin([9, 10]) & ~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['# patients prescribed antithrombotics with aFib'] = self._count_patients(dataframe=antithrombotics_prescribed)
del antithrombotics_prescribed
recommended_antithrombotics_with_afib_alive = afib_flutter_detected[afib_flutter_detected['ANTITHROMBOTICS'].isin([9]) & ~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])].copy()
self.statsDf['recommended_antithrombotics_with_afib_alive_patients'] = self._count_patients(dataframe=recommended_antithrombotics_with_afib_alive)
del recommended_antithrombotics_with_afib_alive
self.statsDf['% patients prescribed antithrombotics with aFib'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed antithrombotics with aFib']/(x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients'] - x['recommended_antithrombotics_with_afib_alive_patients'])) * 100), 2) if (x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients'] - x['recommended_antithrombotics_with_afib_alive_patients']) > 0 else 0, axis=1)
###########
# STATINS #
###########
# For CZ only patients discharged home included
if country_code == 'CZ':
is_tia_discharged_home = is_tia[is_tia['DISCHARGE_DESTINATION'].isin([1])].copy()
self.statsDf['is_tia_discharged_home_patients'] = self._count_patients(dataframe=is_tia_discharged_home)
self.tmp = is_tia_discharged_home.groupby(['Protocol ID', 'STATIN']).size().to_frame('count').reset_index()
del is_tia_discharged_home
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=1, new_column_name='# patients prescribed statins - Yes')
self.statsDf['% patients prescribed statins - Yes'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - Yes']/x['is_tia_discharged_home_patients']) * 100), 2) if x['is_tia_discharged_home_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=2, new_column_name='# patients prescribed statins - No')
self.statsDf['% patients prescribed statins - No'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - No']/x['is_tia_discharged_home_patients']) * 100), 2) if x['is_tia_discharged_home_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=3, new_column_name='# patients prescribed statins - Not known')
self.statsDf['% patients prescribed statins - Not known'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - Not known']/x['is_tia_discharged_home_patients']) * 100), 2) if x['is_tia_discharged_home_patients'] > 0 else 0, axis=1)
else:
self.tmp = is_tia.groupby(['Protocol ID', 'STATIN']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=1, new_column_name='# patients prescribed statins - Yes')
self.statsDf['% patients prescribed statins - Yes'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - Yes']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=2, new_column_name='# patients prescribed statins - No')
self.statsDf['% patients prescribed statins - No'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - No']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="STATIN", value=3, new_column_name='# patients prescribed statins - Not known')
self.statsDf['% patients prescribed statins - Not known'] = self.statsDf.apply(lambda x: round(((x['# patients prescribed statins - Not known']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
####################
# CAROTID STENOSIS #
####################
self.tmp = is_tia.groupby(['Protocol ID', 'CAROTID_STENOSIS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CAROTID_STENOSIS", value=1, new_column_name='# carotid stenosis - 50%-70%')
self.statsDf['% carotid stenosis - 50%-70%'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis - 50%-70%']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_STENOSIS", value=2, new_column_name='# carotid stenosis - >70%')
self.statsDf['% carotid stenosis - >70%'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis - >70%']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_STENOSIS", value=3, new_column_name='# carotid stenosis - No')
self.statsDf['% carotid stenosis - No'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis - No']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_STENOSIS", value=4, new_column_name='# carotid stenosis - Not known')
self.statsDf['% carotid stenosis - Not known'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis - Not known']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
# Create a new column to be used in the graph for carotid stenosis. We were including just over 70% and we need to replace this by carotid stenosis > 50%
self.statsDf['# carotid stenosis - >50%'] = self.statsDf['# carotid stenosis - 50%-70%'] + self.statsDf['# carotid stenosis - >70%']
self.statsDf['% carotid stenosis - >50%'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis - >50%']/x['is_tia_patients']) * 100), 2) if x['is_tia_patients'] > 0 else 0, axis=1)
##############################
# CAROTID STENOSIS FOLLOW-UP #
##############################
# Create temporary dataframe if carotid stenosis was 50-70% or > 70%
carotid_stenosis = is_tia[is_tia['CAROTID_STENOSIS'].isin([1, 2])]
self.tmp = carotid_stenosis.groupby(['Protocol ID', 'CAROTID_STENOSIS_FOLLOWUP']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="CAROTID_STENOSIS_FOLLOWUP", value=1, new_column_name='# carotid stenosis followup - Yes')
self.statsDf['% carotid stenosis followup - Yes'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis followup - Yes']/x['# carotid stenosis - >50%']) * 100), 2) if x['# carotid stenosis - >50%'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_STENOSIS_FOLLOWUP", value=2, new_column_name='# carotid stenosis followup - No')
self.statsDf['% carotid stenosis followup - No'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis followup - No']/x['# carotid stenosis - >50%']) * 100), 2) if x['# carotid stenosis - >50%'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_STENOSIS_FOLLOWUP", value=3, new_column_name='# carotid stenosis followup - No, but planned later')
self.statsDf['% carotid stenosis followup - No, but planned later'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis followup - No, but planned later']/x['# carotid stenosis - >50%']) * 100), 2) if x['# carotid stenosis - >50%'] > 0 else 0, axis=1)
# Create temporary dataframe if carotid stenosis was followed up or planned to follow up later
carotid_stenosis_followup = carotid_stenosis[carotid_stenosis['CAROTID_STENOSIS_FOLLOWUP'].isin([1, 3])].copy()
self.statsDf['# carotid stenosis followup - Yes, but planned'] = self._count_patients(dataframe=carotid_stenosis_followup)
self.statsDf['% carotid stenosis followup - Yes, but planned'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis followup - Yes, but planned']/x['# carotid stenosis - >50%']) * 100), 2) if x['# carotid stenosis - >50%'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CAROTID_STENOSIS_FOLLOWUP", value=4, new_column_name='# carotid stenosis followup - Referred to another centre')
self.statsDf['% carotid stenosis followup - Referred to another centre'] = self.statsDf.apply(lambda x: round(((x['# carotid stenosis followup - Referred to another centre']/x['# carotid stenosis - >50%']) * 100), 2) if x['# carotid stenosis - >50%'] > 0 else 0, axis=1)
del carotid_stenosis, carotid_stenosis_followup
#####################
# ANTIHYPERTENSIVES #
#####################
# tag::antihypertensive[]
if country_code == 'CZ':
# filter patients with recanaliztion procedure 8 and form CZ_4 (antihypertensive not shown in the new version)
discharge_subset_alive_not_returned_back = discharge_subset_alive.loc[~(discharge_subset_alive['crf_parent_name'].isin(['F_RESQ_IVT_TBY_CZ_4']) & discharge_subset_alive['RECANALIZATION_PROCEDURES'].isin([5,6,8]))].copy()
self.statsDf['discharge_subset_alive_not_returned_back_patients'] = self._count_patients(dataframe=discharge_subset_alive_not_returned_back)
self.tmp = discharge_subset_alive_not_returned_back.groupby(['Protocol ID', 'ANTIHYPERTENSIVE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTIHYPERTENSIVE", value=3, new_column_name='# prescribed antihypertensives - Not known')
self.statsDf['% prescribed antihypertensives - Not known'] = self.statsDf.apply(lambda x: round(((x['# prescribed antihypertensives - Not known']/x['discharge_subset_alive_not_returned_back_patients']) * 100), 2) if x['discharge_subset_alive_not_returned_back_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTIHYPERTENSIVE", value=1, new_column_name='# prescribed antihypertensives - Yes')
self.statsDf['% prescribed antihypertensives - Yes'] = self.statsDf.apply(lambda x: round(((x['# prescribed antihypertensives - Yes']/(x['discharge_subset_alive_not_returned_back_patients'] - x['# prescribed antihypertensives - Not known'])) * 100), 2) if (x['discharge_subset_alive_not_returned_back_patients'] - x['# prescribed antihypertensives - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTIHYPERTENSIVE", value=2, new_column_name='# prescribed antihypertensives - No')
self.statsDf['% prescribed antihypertensives - No'] = self.statsDf.apply(lambda x: round(((x['# prescribed antihypertensives - No']/(x['discharge_subset_alive_not_returned_back_patients'] - x['# prescribed antihypertensives - Not known'])) * 100), 2) if (x['discharge_subset_alive_not_returned_back_patients'] - x['# prescribed antihypertensives - Not known']) > 0 else 0, axis=1)
else:
self.tmp = discharge_subset_alive.groupby(['Protocol ID', 'ANTIHYPERTENSIVE']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTIHYPERTENSIVE", value=3, new_column_name='# prescribed antihypertensives - Not known')
self.statsDf['% prescribed antihypertensives - Not known'] = self.statsDf.apply(lambda x: round(((x['# prescribed antihypertensives - Not known']/x['discharge_subset_alive_patients']) * 100), 2) if x['discharge_subset_alive_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTIHYPERTENSIVE", value=1, new_column_name='# prescribed antihypertensives - Yes')
self.statsDf['% prescribed antihypertensives - Yes'] = self.statsDf.apply(lambda x: round(((x['# prescribed antihypertensives - Yes']/(x['discharge_subset_alive_patients'] - x['# prescribed antihypertensives - Not known'])) * 100), 2) if (x['discharge_subset_alive_patients'] - x['# prescribed antihypertensives - Not known']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="ANTIHYPERTENSIVE", value=2, new_column_name='# prescribed antihypertensives - No')
self.statsDf['% prescribed antihypertensives - No'] = self.statsDf.apply(lambda x: round(((x['# prescribed antihypertensives - No']/(x['discharge_subset_alive_patients'] - x['# prescribed antihypertensives - Not known'])) * 100), 2) if (x['discharge_subset_alive_patients'] - x['# prescribed antihypertensives - Not known']) > 0 else 0, axis=1)
# end::antihypertensive[]
#####################
# SMOKING CESSATION #
#####################
# tag::smoking[]
if country_code == 'CZ':
self.tmp = discharge_subset_alive_not_returned_back.groupby(['Protocol ID', 'SMOKING_CESSATION']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="SMOKING_CESSATION", value=3, new_column_name='# recommended to a smoking cessation program - not a smoker')
self.statsDf['% recommended to a smoking cessation program - not a smoker'] = self.statsDf.apply(lambda x: round(((x['# recommended to a smoking cessation program - not a smoker']/x['discharge_subset_alive_not_returned_back_patients']) * 100), 2) if x['discharge_subset_alive_not_returned_back_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="SMOKING_CESSATION", value=1, new_column_name='# recommended to a smoking cessation program - Yes')
self.statsDf['% recommended to a smoking cessation program - Yes'] = self.statsDf.apply(lambda x: round(((x['# recommended to a smoking cessation program - Yes']/x['discharge_subset_alive_not_returned_back_patients']) * 100), 2) if x['discharge_subset_alive_not_returned_back_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="SMOKING_CESSATION", value=2, new_column_name='# recommended to a smoking cessation program - No')
self.statsDf['% recommended to a smoking cessation program - No'] = self.statsDf.apply(lambda x: round(((x['# recommended to a smoking cessation program - No']/x['discharge_subset_alive_not_returned_back_patients']) * 100), 2) if x['discharge_subset_alive_not_returned_back_patients'] > 0 else 0, axis=1)
else:
self.tmp = discharge_subset_alive.groupby(['Protocol ID', 'SMOKING_CESSATION']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="SMOKING_CESSATION", value=3, new_column_name='# recommended to a smoking cessation program - not a smoker')
self.statsDf['% recommended to a smoking cessation program - not a smoker'] = self.statsDf.apply(lambda x: round(((x['# recommended to a smoking cessation program - not a smoker']/x['discharge_subset_alive_patients']) * 100), 2) if x['discharge_subset_alive_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="SMOKING_CESSATION", value=1, new_column_name='# recommended to a smoking cessation program - Yes')
self.statsDf['% recommended to a smoking cessation program - Yes'] = self.statsDf.apply(lambda x: round(((x['# recommended to a smoking cessation program - Yes']/x['discharge_subset_alive_patients']) * 100), 2) if x['discharge_subset_alive_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="SMOKING_CESSATION", value=2, new_column_name='# recommended to a smoking cessation program - No')
self.statsDf['% recommended to a smoking cessation program - No'] = self.statsDf.apply(lambda x: round(((x['# recommended to a smoking cessation program - No']/x['discharge_subset_alive_patients']) * 100), 2) if x['discharge_subset_alive_patients'] > 0 else 0, axis=1)
# end::smoking[]
##########################
# CEREBROVASCULAR EXPERT #
##########################
# tag::cerebrovascular_expert[]
if country_code == 'CZ':
self.tmp = discharge_subset_alive_not_returned_back.groupby(['Protocol ID', 'CEREBROVASCULAR_EXPERT']).size().to_frame('count').reset_index()
# Claculate number of patients entered to the old form
self.statsDf = self._get_values_for_factors(column_name="CEREBROVASCULAR_EXPERT", value=-999, new_column_name='tmp')
self.statsDf = self._get_values_for_factors(column_name="CEREBROVASCULAR_EXPERT", value=1, new_column_name='# recommended to a cerebrovascular expert - Recommended, and appointment was made')
self.statsDf['% recommended to a cerebrovascular expert - Recommended, and appointment was made'] = self.statsDf.apply(lambda x: round(((x['# recommended to a cerebrovascular expert - Recommended, and appointment was made']/(x['discharge_subset_alive_not_returned_back_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_alive_not_returned_back_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CEREBROVASCULAR_EXPERT", value=2, new_column_name='# recommended to a cerebrovascular expert - Recommended, but appointment was not made')
self.statsDf['% recommended to a cerebrovascular expert - Recommended, but appointment was not made'] = self.statsDf.apply(lambda x: round(((x['# recommended to a cerebrovascular expert - Recommended, but appointment was not made']/(x['discharge_subset_alive_not_returned_back_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_alive_not_returned_back_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.loc[:, '# recommended to a cerebrovascular expert - Recommended'] = self.statsDf.apply(lambda x: x['# recommended to a cerebrovascular expert - Recommended, and appointment was made'] + x['# recommended to a cerebrovascular expert - Recommended, but appointment was not made'], axis=1)
self.statsDf['% recommended to a cerebrovascular expert - Recommended'] = self.statsDf.apply(lambda x: round(((x['# recommended to a cerebrovascular expert - Recommended']/(x['discharge_subset_alive_not_returned_back_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_alive_not_returned_back_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CEREBROVASCULAR_EXPERT", value=3, new_column_name='# recommended to a cerebrovascular expert - Not recommended')
self.statsDf['% recommended to a cerebrovascular expert - Not recommended'] = self.statsDf.apply(lambda x: round(((x['# recommended to a cerebrovascular expert - Not recommended']/(x['discharge_subset_alive_not_returned_back_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_alive_not_returned_back_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
else:
self.tmp = discharge_subset_alive.groupby(['Protocol ID', 'CEREBROVASCULAR_EXPERT']).size().to_frame('count').reset_index()
# Claculate number of patients entered to the old form
self.statsDf = self._get_values_for_factors(column_name="CEREBROVASCULAR_EXPERT", value=-999, new_column_name='tmp')
self.statsDf = self._get_values_for_factors(column_name="CEREBROVASCULAR_EXPERT", value=1, new_column_name='# recommended to a cerebrovascular expert - Recommended, and appointment was made')
self.statsDf['% recommended to a cerebrovascular expert - Recommended, and appointment was made'] = self.statsDf.apply(lambda x: round(((x['# recommended to a cerebrovascular expert - Recommended, and appointment was made']/(x['discharge_subset_alive_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_alive_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CEREBROVASCULAR_EXPERT", value=2, new_column_name='# recommended to a cerebrovascular expert - Recommended, but appointment was not made')
self.statsDf['% recommended to a cerebrovascular expert - Recommended, but appointment was not made'] = self.statsDf.apply(lambda x: round(((x['# recommended to a cerebrovascular expert - Recommended, but appointment was not made']/(x['discharge_subset_alive_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_alive_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.loc[:, '# recommended to a cerebrovascular expert - Recommended'] = self.statsDf.apply(lambda x: x['# recommended to a cerebrovascular expert - Recommended, and appointment was made'] + x['# recommended to a cerebrovascular expert - Recommended, but appointment was not made'], axis=1)
self.statsDf['% recommended to a cerebrovascular expert - Recommended'] = self.statsDf.apply(lambda x: round(((x['# recommended to a cerebrovascular expert - Recommended']/(x['discharge_subset_alive_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_alive_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="CEREBROVASCULAR_EXPERT", value=3, new_column_name='# recommended to a cerebrovascular expert - Not recommended')
self.statsDf['% recommended to a cerebrovascular expert - Not recommended'] = self.statsDf.apply(lambda x: round(((x['# recommended to a cerebrovascular expert - Not recommended']/(x['discharge_subset_alive_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_alive_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
# end::cerebrovascular_expert[]
#########################
# DISCHARGE DESTINATION #
#########################
self.tmp = discharge_subset.groupby(['Protocol ID', 'DISCHARGE_DESTINATION']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=1, new_column_name='# discharge destination - Home')
self.statsDf['% discharge destination - Home'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Home']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=2, new_column_name='# discharge destination - Transferred within the same centre')
self.statsDf['% discharge destination - Transferred within the same centre'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Transferred within the same centre']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=3, new_column_name='# discharge destination - Transferred to another centre')
self.statsDf['% discharge destination - Transferred to another centre'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Transferred to another centre']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=4, new_column_name='# discharge destination - Social care facility')
self.statsDf['% discharge destination - Social care facility'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Social care facility']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_DESTINATION", value=5, new_column_name='# discharge destination - Dead')
self.statsDf['% discharge destination - Dead'] = self.statsDf.apply(lambda x: round(((x['# discharge destination - Dead']/x['discharge_subset_patients']) * 100), 2) if x['discharge_subset_patients'] > 0 else 0, axis=1)
#######################################
# DISCHARGE DESTINATION - SAME CENTRE #
#######################################
discharge_subset_same_centre = discharge_subset[discharge_subset['DISCHARGE_DESTINATION'].isin([2])].copy()
self.statsDf['discharge_subset_same_centre_patients'] = self._count_patients(dataframe=discharge_subset_same_centre)
self.tmp = discharge_subset_same_centre.groupby(['Protocol ID', 'DISCHARGE_SAME_FACILITY']).size().to_frame('count').reset_index()
del discharge_subset_same_centre
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_SAME_FACILITY", value=1, new_column_name='# transferred within the same centre - Acute rehabilitation')
self.statsDf['% transferred within the same centre - Acute rehabilitation'] = self.statsDf.apply(lambda x: round(((x['# transferred within the same centre - Acute rehabilitation']/x['discharge_subset_same_centre_patients']) * 100), 2) if x['discharge_subset_same_centre_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_SAME_FACILITY", value=2, new_column_name='# transferred within the same centre - Post-care bed')
self.statsDf['% transferred within the same centre - Post-care bed'] = self.statsDf.apply(lambda x: round(((x['# transferred within the same centre - Post-care bed']/x['discharge_subset_same_centre_patients']) * 100), 2) if x['discharge_subset_same_centre_patients'] > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_SAME_FACILITY", value=3, new_column_name='# transferred within the same centre - Another department')
self.statsDf['% transferred within the same centre - Another department'] = self.statsDf.apply(lambda x: round(((x['# transferred within the same centre - Another department']/x['discharge_subset_same_centre_patients']) * 100), 2) if x['discharge_subset_same_centre_patients'] > 0 else 0, axis=1)
############################################
# DISCHARGE DESTINATION - ANOTHER FACILITY #
############################################
discharge_subset_another_centre = discharge_subset[discharge_subset['DISCHARGE_DESTINATION'].isin([3])].copy()
self.statsDf['discharge_subset_another_centre_patients'] = self._count_patients(dataframe=discharge_subset_another_centre)
self.tmp = discharge_subset_another_centre.groupby(['Protocol ID', 'DISCHARGE_OTHER_FACILITY']).size().to_frame('count').reset_index()
# Calculate number of patients entered to the old form
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_OTHER_FACILITY", value=-999, new_column_name='tmp')
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_OTHER_FACILITY", value=1, new_column_name='# transferred to another centre - Stroke centre')
self.statsDf['% transferred to another centre - Stroke centre'] = self.statsDf.apply(lambda x: round(((x['# transferred to another centre - Stroke centre']/(x['discharge_subset_another_centre_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_another_centre_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_OTHER_FACILITY", value=2, new_column_name='# transferred to another centre - Comprehensive stroke centre')
self.statsDf['% transferred to another centre - Comprehensive stroke centre'] = self.statsDf.apply(lambda x: round(((x['# transferred to another centre - Comprehensive stroke centre']/(x['discharge_subset_another_centre_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_another_centre_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf = self._get_values_for_factors(column_name="DISCHARGE_OTHER_FACILITY", value=3, new_column_name='# transferred to another centre - Another hospital')
self.statsDf['% transferred to another centre - Another hospital'] = self.statsDf.apply(lambda x: round(((x['# transferred to another centre - Another hospital']/(x['discharge_subset_another_centre_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_another_centre_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
#########################################################
# DISCHARGE DESTINATION - ANOTHER FACILITY - DEPARTMENT #
#########################################################
self.tmp = discharge_subset_another_centre.groupby(['Protocol ID', 'DISCHARGE_OTHER_FACILITY_O1']).size().to_frame('count').reset_index()
tmp_o2 = discharge_subset_another_centre.groupby(['Protocol ID', 'DISCHARGE_OTHER_FACILITY_O2']).size().to_frame('count').reset_index()
tmp_o3 = discharge_subset_another_centre.groupby(['Protocol ID', 'DISCHARGE_OTHER_FACILITY_O3']).size().to_frame('count').reset_index()
del discharge_subset_another_centre
# Calculate number of patients entered to the old form
self.statsDf.loc[:, 'tmp'] = 0
self.statsDf['# department transferred to within another centre - Acute rehabilitation'] = self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O1", value=1, dataframe=self.tmp) + self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O2", value=1, dataframe=tmp_o2) + self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O3", value=1, dataframe=tmp_o3)
self.statsDf['% department transferred to within another centre - Acute rehabilitation'] = self.statsDf.apply(lambda x: round(((x['# department transferred to within another centre - Acute rehabilitation']/(x['discharge_subset_another_centre_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_another_centre_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf['# department transferred to within another centre - Post-care bed'] = self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O1", value=2, dataframe=self.tmp) + self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O2", value=2, dataframe=tmp_o2) + self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O3", value=2, dataframe=tmp_o3)
self.statsDf['% department transferred to within another centre - Post-care bed'] = self.statsDf.apply(lambda x: round(((x['# department transferred to within another centre - Post-care bed']/(x['discharge_subset_another_centre_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_another_centre_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf['# department transferred to within another centre - Neurology'] = self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O1", value=3, dataframe=self.tmp) + self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O2", value=3, dataframe=tmp_o2) + self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O3", value=3, dataframe=tmp_o3)
self.statsDf['% department transferred to within another centre - Neurology'] = self.statsDf.apply(lambda x: round(((x['# department transferred to within another centre - Neurology']/(x['discharge_subset_another_centre_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_another_centre_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf['# department transferred to within another centre - Another department'] = self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O1", value=4, dataframe=self.tmp) + self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O2", value=4, dataframe=tmp_o2) + self._get_values_only_columns(column_name="DISCHARGE_OTHER_FACILITY_O3", value=4, dataframe=tmp_o3)
self.statsDf['% department transferred to within another centre - Another department'] = self.statsDf.apply(lambda x: round(((x['# department transferred to within another centre - Another department']/(x['discharge_subset_another_centre_patients'] - x['tmp'])) * 100), 2) if (x['discharge_subset_another_centre_patients'] - x['tmp']) > 0 else 0, axis=1)
self.statsDf.drop(['tmp'], inplace=True, axis=1)
############################################
# DISCHARGE DESTINATION - ANOTHER FACILITY #
############################################
discharge_subset.fillna(0, inplace=True)
discharge_subset_mrs = discharge_subset[~discharge_subset['DISCHARGE_MRS'].isin([0])].copy()
del discharge_subset
#discharge_subset_mrs['DISCHARGE_MRS'] = discharge_subset_mrs['DISCHARGE_MRS'].astype(float)
def convert_mrs_on_discharge(x):
""" The function calculating mRS on discharge. Options: 1 (unknown/derivate), 2 = 0, 3 = 1, 4 = 2, 5 = 3, 6 = 4, 7 = 5, 8 = 6.
:param x: the mRS value from the dropdown
:type x: int
:returns: x -- value converted to score
"""
x = float(x)
if (x == 1):
x = x - 1
else:
x = x - 2
return x
if discharge_subset_mrs.empty:
self.statsDf['Median discharge mRS'] = 0
self.statsDf.fillna(0, inplace=True)
else:
discharge_subset_mrs['DISCHARGE_MRS_ADJUSTED'] = discharge_subset_mrs.apply(lambda row: convert_mrs_on_discharge(row['DISCHARGE_MRS']), axis=1)
discharge_subset_mrs['DISCHARGE_MRS_ADDED'] = discharge_subset_mrs['DISCHARGE_MRS_ADJUSTED'] + discharge_subset_mrs['D_MRS_SCORE']
discharge_subset_mrs.fillna(0, inplace=True)
self.statsDf = self.statsDf.merge(discharge_subset_mrs.groupby(['Protocol ID']).DISCHARGE_MRS_ADDED.agg(['median']).rename(columns={'median': 'Median discharge mRS'})['Median discharge mRS'].reset_index(), how='outer')
self.statsDf.fillna(0, inplace=True)
del discharge_subset_mrs
########################
# MEDIAN HOSPITAL STAY #
########################
positive_hospital_days = self.df[self.df['HOSPITAL_DAYS'] > 0]
self.statsDf = self.statsDf.merge(positive_hospital_days.groupby(['Protocol ID']).HOSPITAL_DAYS.agg(['median']).rename(columns={'median': 'Median hospital stay (days)'})['Median hospital stay (days)'].reset_index(), how='outer')
self.statsDf.fillna(0, inplace=True)
del positive_hospital_days
###########################
# MEDIAN LAST SEEN NORMAL #
###########################
self.statsDf = self.statsDf.merge(self.df[self.df['LAST_SEEN_NORMAL'] != 0].groupby(['Protocol ID']).LAST_SEEN_NORMAL.agg(['median']).rename(columns={'median': 'Median last seen normal'})['Median last seen normal'].reset_index(), how='outer')
self.statsDf.fillna(0, inplace=True)
# ELIGIBLE RECANALIZATION
wrong_ivtpa = recanalization_procedure_iv_tpa.loc[recanalization_procedure_iv_tpa['IVTPA'] <= 0]
self.statsDf['wrong_ivtpa'] = self._count_patients(dataframe=wrong_ivtpa)
# self.statsDf.loc[:, '# patients eligible thrombolysis'] = self.statsDf.apply(lambda x: (x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment']) - x['wrong_ivtpa'], axis=1)
self.statsDf.loc[:, '# patients eligible thrombolysis'] = self.statsDf.apply(lambda x: x['# IV tPa'] - x['wrong_ivtpa'], axis=1)
self.statsDf.drop(['wrong_ivtpa'], inplace=True, axis=1)
del wrong_ivtpa
wrong_tby = recanalization_procedure_tby_dtg[recanalization_procedure_tby_dtg['TBY'] <= 0]
self.statsDf['wrong_tby'] = self._count_patients(dataframe=wrong_tby)
self.statsDf.loc[:, '# patients eligible thrombectomy'] = self.statsDf.apply(lambda x: (x['# TBY'] - x['wrong_tby']), axis=1)
self.statsDf.drop(['wrong_tby'], inplace=True, axis=1)
# if country_code == 'CZ':
# self.statsDf.loc[:, '# patients eligible thrombectomy'] = self.statsDf.apply(lambda x: (x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - Referred to another centre for endovascular treatment and hospitalization continues at the referred to centre'] + x['# recanalization procedures - Referred for endovascular treatment and patient is returned to the initial centre']) - x['wrong_tby'], axis=1)
#
# else:
# self.statsDf.loc[:, '# patients eligible thrombectomy'] = self.statsDf.apply(lambda x: (x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone']) - x['wrong_tby'], axis=1)
# self.statsDf.drop(['wrong_tby'], inplace=True, axis=1)
# self.statsDf.loc[:, 'patients_eligible_recanalization'] = self.statsDf.apply(lambda x: x['# recanalization procedures - Not done'] + x['# recanalization procedures - IV tPa'] + x['# recanalization procedures - IV tPa + endovascular treatment'] + x['# recanalization procedures - Endovascular treatment alone'] + x['# recanalization procedures - IV tPa + referred to another centre for endovascular treatment'], axis=1)
del wrong_tby
ivt_tby_mix = isch.loc[(isch['IVT_DONE'] == 1) | (isch['TBY_DONE'] == 1)].copy()
self.statsDf['patients_eligible_recanalization'] = self._count_patients(dataframe=ivt_tby_mix)
del ivt_tby_mix
################
# ANGEL AWARDS #
################
self.total_patient_column = '# total patients >= {0}'.format(self.patient_limit)
self.statsDf[self.total_patient_column] = self.statsDf['Total Patients'] >= self.patient_limit
## Calculate classic recanalization procedure
#recanalization_procedure_tby_only_dtg = recanalization_procedure_tby_dtg[recanalization_procedure_tby_dtg['RECANALIZATION_PROCEDURES'].isin([4])]
recanalization_procedure_tby_only_dtg = recanalization_procedure_tby_dtg.loc[
recanalization_procedure_tby_dtg['IVT_DONE'] == 0
]
# Create temporary dataframe only with rows where thrombolysis was performed under 60 minute
recanalization_procedure_iv_tpa_under_60 = recanalization_procedure_iv_tpa.loc[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 60)]
# Create temporary dataframe only with rows where thrombolysis was performed under 45 minute
recanalization_procedure_iv_tpa_under_45 = recanalization_procedure_iv_tpa.loc[(recanalization_procedure_iv_tpa['IVTPA'] > 0) & (recanalization_procedure_iv_tpa['IVTPA'] <= 45)]
del recanalization_procedure_iv_tpa
recanalization_procedure_tby_only_dtg_under_60 = recanalization_procedure_tby_only_dtg.loc[(recanalization_procedure_tby_only_dtg['TBY'] > 0) & (recanalization_procedure_tby_only_dtg['TBY'] <= 60)]
self.statsDf['# patients treated with door to recanalization therapy < 60 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_60) + self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_60)
self.statsDf['% patients treated with door to recanalization therapy < 60 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to recanalization therapy < 60 minutes']/x['# patients recanalized']) * 100), 2) if x['# patients recanalized'] > 0 else 0, axis=1)
recanalization_procedure_tby_only_dtg_under_45 = recanalization_procedure_tby_only_dtg.loc[(recanalization_procedure_tby_only_dtg['TBY'] > 0) & (recanalization_procedure_tby_only_dtg['TBY'] <= 45)]
self.statsDf['# patients treated with door to recanalization therapy < 45 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_45) + self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_45)
self.statsDf['% patients treated with door to recanalization therapy < 45 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to recanalization therapy < 45 minutes']/x['# patients recanalized']) * 100), 2) if x['# patients recanalized'] > 0 else 0, axis=1)
del recanalization_procedure_tby_only_dtg
#### DOOR TO THROMBOLYSIS THERAPY - MINUTES ####
# If thrombectomy done not at all, take the possible lowest award they can get
self.statsDf['# patients treated with door to thrombolysis < 60 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_60)
self.statsDf['% patients treated with door to thrombolysis < 60 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to thrombolysis < 60 minutes']/x['# patients eligible thrombolysis']) * 100), 2) if x['# patients eligible thrombolysis'] > 0 else 0, axis=1)
del recanalization_procedure_iv_tpa_under_60
self.statsDf['# patients treated with door to thrombolysis < 45 minutes'] = self._count_patients(dataframe=recanalization_procedure_iv_tpa_under_45)
self.statsDf['% patients treated with door to thrombolysis < 45 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to thrombolysis < 45 minutes']/x['# patients eligible thrombolysis']) * 100), 2) if x['# patients eligible thrombolysis'] > 0 else 0, axis=1)
del recanalization_procedure_iv_tpa_under_45
# Create temporary dataframe only with rows where trombectomy was performed under 90 minutes
recanalization_procedure_tby_only_dtg_under_120 = recanalization_procedure_tby_dtg.loc[(recanalization_procedure_tby_dtg['TBY'] > 0) & (recanalization_procedure_tby_dtg['TBY'] <= 120)]
# Create temporary dataframe only with rows where trombectomy was performed under 60 minutes
recanalization_procedure_tby_only_dtg_under_90 = recanalization_procedure_tby_dtg.loc[(recanalization_procedure_tby_dtg['TBY'] > 0) & (recanalization_procedure_tby_dtg['TBY'] <= 90)]
del recanalization_procedure_tby_dtg
self.statsDf['# patients treated with door to thrombectomy < 120 minutes'] = self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_120)
self.statsDf['% patients treated with door to thrombectomy < 120 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to thrombectomy < 120 minutes']/x['# patients eligible thrombectomy']) * 100), 2) if x['# patients eligible thrombectomy'] > 0 else 0, axis=1)
del recanalization_procedure_tby_only_dtg_under_120
self.statsDf['# patients treated with door to thrombectomy < 90 minutes'] = self._count_patients(dataframe=recanalization_procedure_tby_only_dtg_under_90)
self.statsDf['% patients treated with door to thrombectomy < 90 minutes'] = self.statsDf.apply(lambda x: round(((x['# patients treated with door to thrombectomy < 90 minutes']/x['# patients eligible thrombectomy']) * 100), 2) if x['# patients eligible thrombectomy'] > 0 else 0, axis=1)
del recanalization_procedure_tby_only_dtg_under_90
#### RECANALIZATION RATE ####
self.statsDf['# recanalization rate out of total ischemic incidence'] = self.statsDf['# patients recanalized']
self.statsDf['% recanalization rate out of total ischemic incidence'] = self.statsDf['% patients recanalized']
#### CT/MRI ####
self.statsDf['# suspected stroke patients undergoing CT/MRI'] = self.statsDf['# CT/MRI - performed']
self.statsDf['% suspected stroke patients undergoing CT/MRI'] = self.statsDf['% CT/MRI - performed']
#### DYSPHAGIA SCREENING ####
self.statsDf['# all stroke patients undergoing dysphagia screening'] = self.statsDf['# dysphagia screening - Guss test'] + self.statsDf['# dysphagia screening - Other test']
self.statsDf['% all stroke patients undergoing dysphagia screening'] = self.statsDf.apply(lambda x: round(((x['# all stroke patients undergoing dysphagia screening']/(x['# all stroke patients undergoing dysphagia screening'] + x['# dysphagia screening - Not done'])) * 100), 2) if (x['# all stroke patients undergoing dysphagia screening'] + x['# dysphagia screening - Not done']) > 0 else 0, axis=1)
#### ISCHEMIC STROKE + NO AFIB + ANTIPLATELETS ####
# Exclude patients referred for recanalization procedure
non_transferred_antiplatelets = antithrombotics[~antithrombotics['RECANALIZATION_PROCEDURES'].isin([5,6])]
# Get temporary dataframe with patients who have prescribed antithrombotics and ischemic stroke
antiplatelets = non_transferred_antiplatelets[
non_transferred_antiplatelets['STROKE_TYPE'].isin([1])]
del non_transferred_antiplatelets
# Filter temporary dataframe and get only patients who have not been detected or not known for aFib flutter.
antiplatelets = antiplatelets[antiplatelets['AFIB_FLUTTER'].isin([4, 5])]
# Get patients who have prescribed antithrombotics
# exclude also patients with option 11 - applies to PT form
except_recommended = antiplatelets[~antiplatelets['ANTITHROMBOTICS'].isin([9, 11])]
# Get number of patients who have prescribed antithrombotics and ischemic stroke, have not been detected or not known for aFib flutter.
self.statsDf['except_recommended_patients'] = self._count_patients(dataframe=except_recommended)
# Get temporary dataframe groupby protocol ID and antithrombotics column
self.tmp = antiplatelets.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# ischemic stroke patients discharged with antiplatelets')
self.statsDf['% ischemic stroke patients discharged with antiplatelets'] = self.statsDf.apply(lambda x: round(((x['# ischemic stroke patients discharged with antiplatelets']/x['except_recommended_patients']) * 100), 2) if x['except_recommended_patients'] > 0 else 0, axis=1)
# discharged home
antiplatelets_discharged_home = antiplatelets[antiplatelets['DISCHARGE_DESTINATION'].isin([1])]
if (antiplatelets_discharged_home.empty):
self.tmp = antiplatelets.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# ischemic stroke patients discharged home with antiplatelets')
self.statsDf['% ischemic stroke patients discharged home with antiplatelets'] = self.statsDf.apply(lambda x: round(((x['# ischemic stroke patients discharged home with antiplatelets']/x['except_recommended_patients']) * 100), 2) if x['except_recommended_patients'] > 0 else 0, axis=1)
self.statsDf['except_recommended_discharged_home_patients'] = self.statsDf['except_recommended_patients']
else:
self.tmp = antiplatelets_discharged_home.groupby(['Protocol ID', 'ANTITHROMBOTICS']).size().to_frame('count').reset_index()
# Get patients who have prescribed antithrombotics
except_recommended_discharged_home = except_recommended[except_recommended['DISCHARGE_DESTINATION'].isin([1])]
# Get number of patients who have prescribed antithrombotics and ischemic stroke, have not been detected or not known for aFib flutter.
self.statsDf['except_recommended_discharged_home_patients'] = self._count_patients(dataframe=except_recommended_discharged_home)
self.statsDf = self._get_values_for_factors(column_name="ANTITHROMBOTICS", value=1, new_column_name='# ischemic stroke patients discharged home with antiplatelets')
self.statsDf['% ischemic stroke patients discharged home with antiplatelets'] = self.statsDf.apply(lambda x: round(((x['# ischemic stroke patients discharged home with antiplatelets']/x['except_recommended_discharged_home_patients']) * 100), 2) if x['except_recommended_discharged_home_patients'] > 0 else 0, axis=1)
# Comapre number of ischemic stroke patients discharged with antiplatelets to the discharged home with antiplatelets and select the higher value
self.statsDf['# ischemic stroke patients discharged (home) with antiplatelets'] = self.statsDf.apply(lambda x: x['# ischemic stroke patients discharged with antiplatelets'] if x['# ischemic stroke patients discharged with antiplatelets'] > x['# ischemic stroke patients discharged home with antiplatelets'] else x['# ischemic stroke patients discharged home with antiplatelets'], axis=1)
self.statsDf['% ischemic stroke patients discharged (home) with antiplatelets'] = self.statsDf.apply(lambda x: x['% ischemic stroke patients discharged with antiplatelets'] if x['% ischemic stroke patients discharged with antiplatelets'] > x['% ischemic stroke patients discharged home with antiplatelets'] else x['% ischemic stroke patients discharged home with antiplatelets'], axis=1)
#### ISCHEMIC STROKE + AFIB + ANTICOAGULANTS ####
afib_flutter_detected = is_tia.loc[
is_tia['AFIB_FLUTTER'].isin([1, 2, 3])
].copy()
# exclude also patients with option 11 - applies to PT form
anticoagulants_prescribed = afib_flutter_detected[
~afib_flutter_detected['ANTITHROMBOTICS'].isin([1, 10, 9, 11]) &
~afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])
].copy()
not_transferred_afib_flutter_detected = afib_flutter_detected.loc[
~afib_flutter_detected['RECANALIZATION_PROCEDURES'].isin([5,6])
]
non_trasferred_anticoagulants = anticoagulants_prescribed[
~anticoagulants_prescribed['RECANALIZATION_PROCEDURES'].isin([5,6])
]
self.statsDf['# afib patients discharged with anticoagulants'] = self._count_patients(dataframe=non_trasferred_anticoagulants)
#self.statsDf['# afib patients discharged with anticoagulants'] = self._count_patients(dataframe=anticoagulants_prescribed)
# Get temporary dataframe with patients who are not dead with detected aFib flutter and with prescribed antithrombotics or with nothign (ANTITHROMBOTICS = 10)
# exclude also patients with option 11 - applies to PT form
afib_detected_discharged_home = not_transferred_afib_flutter_detected[
(~not_transferred_afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])) &
(~not_transferred_afib_flutter_detected['ANTITHROMBOTICS'].isin([1,9,11]))
]
# Get afib patients discharged and not dead
self.statsDf['afib_detected_discharged_patients'] = self._count_patients(dataframe=afib_detected_discharged_home)
# self.statsDf['% afib patients discharged with anticoagulants'] = self.statsDf.apply(lambda x: round(((x['# afib patients discharged with anticoagulants']/(x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients'])) * 100), 2) if (x['afib_flutter_detected_patients'] - x['afib_flutter_detected_dead_patients']) > 0 else 0, axis=1)
self.statsDf['% afib patients discharged with anticoagulants'] = self.statsDf.apply(
lambda x: round((
(x['# afib patients discharged with anticoagulants']/x['afib_detected_discharged_patients']) * 100
), 2) if (x['afib_detected_discharged_patients']) > 0 else 0, axis=1
)
# Get temporary dataframe with patients who have prescribed anticoagulats and were discharged home
anticoagulants_prescribed_discharged_home = non_trasferred_anticoagulants[
non_trasferred_anticoagulants['DISCHARGE_DESTINATION'].isin([1])
]
# anticoagulants_prescribed_discharged_home = anticoagulants_prescribed[anticoagulants_prescribed['DISCHARGE_DESTINATION'].isin([1])]
# Get temporary dataframe with patients who have been discharge at home with detected aFib flutter and with prescribed antithrombotics
# afib_detected_discharged_home = afib_flutter_detected[(afib_flutter_detected['DISCHARGE_DESTINATION'].isin([1])) & (~afib_flutter_detected['ANTITHROMBOTICS'].isin([9]))]
# exclude also patients with option 11 - applies to PT form
afib_detected_discharged_home = not_transferred_afib_flutter_detected[
(not_transferred_afib_flutter_detected['DISCHARGE_DESTINATION'].isin([1])) &
(~not_transferred_afib_flutter_detected['ANTITHROMBOTICS'].isin([1,9,11]))
]
# Check if temporary dataframe is empty. If yes, the value is calculated not only for discharged home, but only dead patients are excluded
if (anticoagulants_prescribed_discharged_home.empty):
# afib patients discharged home with anticoagulants
anticoagulants_prescribed_discharged_home = non_trasferred_anticoagulants.copy()
# Get temporary dataframe with patients who are not dead with detected aFib flutter and with prescribed antithrombotics
# exclude also patients with option 11 - applies to PT form
afib_detected_discharged_home = not_transferred_afib_flutter_detected[
(~not_transferred_afib_flutter_detected['DISCHARGE_DESTINATION'].isin([5])) &
(~not_transferred_afib_flutter_detected['ANTITHROMBOTICS'].isin([1,9,11]))
]
# Get # afib patients discharged home with anticoagulants
self.statsDf['# afib patients discharged home with anticoagulants'] = self._count_patients(dataframe=anticoagulants_prescribed_discharged_home)
# Get afib patients discharged and not dead
self.statsDf['afib_detected_discharged_home_patients'] = self._count_patients(dataframe=afib_detected_discharged_home)
# Get % afib patients discharge with anticoagulants and not dead
self.statsDf['% afib patients discharged home with anticoagulants'] = self.statsDf.apply(lambda x: round(((x['# afib patients discharged home with anticoagulants']/x['afib_detected_discharged_home_patients']) * 100), 2) if x['afib_detected_discharged_home_patients'] > 0 else 0, axis=1)
else:
self.statsDf['# afib patients discharged home with anticoagulants'] = self._count_patients(dataframe=anticoagulants_prescribed_discharged_home)
# Get afib patients discharged home
self.statsDf['afib_detected_discharged_home_patients'] = self._count_patients(dataframe=afib_detected_discharged_home)
self.statsDf['% afib patients discharged home with anticoagulants'] = self.statsDf.apply(lambda x: round(((x['# afib patients discharged home with anticoagulants']/x['afib_detected_discharged_home_patients']) * 100), 2) if x['afib_detected_discharged_home_patients'] > 0 else 0, axis=1)
self.statsDf['# afib patients discharged (home) with anticoagulants'] = self.statsDf.apply(lambda x: x['# afib patients discharged with anticoagulants'] if x['% afib patients discharged with anticoagulants'] > x['% afib patients discharged home with anticoagulants'] else x['# afib patients discharged home with anticoagulants'], axis=1)
self.statsDf['% afib patients discharged (home) with anticoagulants'] = self.statsDf.apply(lambda x: x['% afib patients discharged with anticoagulants'] if x['% afib patients discharged with anticoagulants'] > x['% afib patients discharged home with anticoagulants'] else x['% afib patients discharged home with anticoagulants'], axis=1)
#### STROKE UNIT ####
# stroke patients treated in a dedicated stroke unit / ICU
self.statsDf['# stroke patients treated in a dedicated stroke unit / ICU'] = self.statsDf['# patients hospitalized in stroke unit / ICU']
# % stroke patients treated in a dedicated stroke unit / ICU
self.statsDf['% stroke patients treated in a dedicated stroke unit / ICU'] = self.statsDf['% patients hospitalized in stroke unit / ICU']
# Create temporary dataframe to calculate final award
self.angels_awards_tmp = self.statsDf[[self.total_patient_column, '% patients treated with door to recanalization therapy < 60 minutes', '% patients treated with door to recanalization therapy < 45 minutes', '% patients treated with door to thrombolysis < 60 minutes', '% patients treated with door to thrombolysis < 45 minutes', '% patients treated with door to thrombectomy < 120 minutes', '% patients treated with door to thrombectomy < 90 minutes', '% recanalization rate out of total ischemic incidence', '% suspected stroke patients undergoing CT/MRI', '% all stroke patients undergoing dysphagia screening', '% ischemic stroke patients discharged (home) with antiplatelets', '% afib patients discharged (home) with anticoagulants', '% stroke patients treated in a dedicated stroke unit / ICU', '# patients eligible thrombectomy', '# patients eligible thrombolysis']]
#self.angels_awards_tmp = self.statsDf[[self.total_patient_column, '% patients treated with door to recanalization therapy < 60 minutes', '% patients treated with door to recanalization therapy < 45 minutes', '% patients treated with door to thrombolysis < 60 minutes', '% patients treated with door to thrombolysis < 45 minutes', '% patients treated with door to thrombectomy < 120 minutes', '% patients treated with door to thrombectomy < 90 minutes', '% recanalization rate out of total ischemic incidence', '% suspected stroke patients undergoing CT/MRI', '% all stroke patients undergoing dysphagia screening', '% ischemic stroke patients discharged (home) with antiplatelets', '% patients prescribed anticoagulants with aFib', '% stroke patients treated in a dedicated stroke unit / ICU', '# patients eligible thrombectomy', '# patients eligible thrombolysis']]
self.statsDf.fillna(0, inplace=True)
self.angels_awards_tmp.loc[:, 'Proposed Award (old calculation)'] = self.angels_awards_tmp.apply(lambda x: self._get_final_award(x, new_calculation=False), axis=1)
self.angels_awards_tmp.loc[:, 'Proposed Award'] = self.angels_awards_tmp.apply(lambda x: self._get_final_award(x, new_calculation=True), axis=1)
self.statsDf['Proposed Award (old calculation)'] = self.angels_awards_tmp['Proposed Award (old calculation)']
self.statsDf['Proposed Award'] = self.angels_awards_tmp['Proposed Award']
self.statsDf.rename(columns={"Protocol ID": "Site ID"}, inplace=True)
self.statsDf.drop_duplicates(inplace=True)
self.sites = self._get_sites(self.statsDf)
del isch, is_ich_tia_cvt, is_ich_cvt, is_ich, is_tia, is_ich_sah_cvt, is_tia_cvt, cvt, ich_sah, ich, sah, discharge_subset_alive
def _get_final_award(self, x, new_calculation=True):
""" The function calculating the proposed award.
:param x: the row from temporary dataframe
:type x: pandas series
:returns: award -- the proposed award
"""
if x[self.total_patient_column] == False:
award = "STROKEREADY"
else:
if new_calculation:
thrombolysis_therapy_lt_60min = x['% patients treated with door to thrombolysis < 60 minutes']
# Calculate award for thrombolysis, if no patients were eligible for thrombolysis and number of total patients was greater than minimum than the award is set to DIAMOND
if (float(thrombolysis_therapy_lt_60min) >= 50 and float(thrombolysis_therapy_lt_60min) <= 74.99):
award = "GOLD"
elif (float(thrombolysis_therapy_lt_60min) >= 75):
award = "DIAMOND"
else:
award = "STROKEREADY"
thrombolysis_therapy_lt_45min = x['% patients treated with door to thrombolysis < 45 minutes']
if award != "STROKEREADY":
if (float(thrombolysis_therapy_lt_45min) <= 49.99):
if (award != "GOLD" or award == "DIAMOND"):
award = "PLATINUM"
elif (float(thrombolysis_therapy_lt_45min) >= 50):
if (award != "GOLD"):
award = "DIAMOND"
else:
award = "STROKEREADY"
# Calculate award for thrombectomy, if no patients were eligible for thrombectomy and number of total patients was greater than minimum than the award is set to the possible proposed award (eg. if in thrombolysis step award was set to GOLD then the award will be GOLD)
thrombectomy_pts = x['# patients eligible thrombectomy']
# if thrombectomy_pts != 0:
if thrombectomy_pts > 3:
thrombectomy_therapy_lt_120min = x['% patients treated with door to thrombectomy < 120 minutes']
if award != "STROKEREADY":
if (float(thrombectomy_therapy_lt_120min) >= 50 and float(thrombectomy_therapy_lt_120min) <= 74.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(thrombectomy_therapy_lt_120min) >= 75):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
thrombectomy_therapy_lt_90min = x['% patients treated with door to thrombectomy < 90 minutes']
if award != "STROKEREADY":
if (float(thrombectomy_therapy_lt_90min) <= 49.99):
if (award != "GOLD" or award == "DIAMOND"):
award = "PLATINUM"
elif (float(thrombectomy_therapy_lt_90min) >= 50):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
else:
recan_therapy_lt_60min = x['% patients treated with door to recanalization therapy < 60 minutes']
if (float(recan_therapy_lt_60min) >= 50 and float(recan_therapy_lt_60min) <= 74.99):
award = "GOLD"
elif (float(recan_therapy_lt_60min) >= 75):
award = "DIAMOND"
else:
award = "STROKEREADY"
recan_therapy_lt_45min = x['% patients treated with door to recanalization therapy < 45 minutes']
if award != "STROKEREADY":
if (float(recan_therapy_lt_45min) <= 49.99):
if (award != "GOLD" or award == "DIAMOND"):
award = "PLATINUM"
elif (float(recan_therapy_lt_45min) >= 50):
if (award != "GOLD"):
award = "DIAMOND"
else:
award = "STROKEREADY"
recan_rate = x['% recanalization rate out of total ischemic incidence']
if award != "STROKEREADY":
if (float(recan_rate) >= 5 and float(recan_rate) <= 14.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(recan_rate) >= 15 and float(recan_rate) <= 24.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(recan_rate) >= 25):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
ct_mri = x['% suspected stroke patients undergoing CT/MRI']
if award != "STROKEREADY":
if (float(ct_mri) >= 80 and float(ct_mri) <= 84.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(ct_mri) >= 85 and float(ct_mri) <= 89.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(ct_mri) >= 90):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
dysphagia_screening = x['% all stroke patients undergoing dysphagia screening']
if award != "STROKEREADY":
if (float(dysphagia_screening) >= 80 and float(dysphagia_screening) <= 84.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(dysphagia_screening) >= 85 and float(dysphagia_screening) <= 89.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(dysphagia_screening) >= 90):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
discharged_with_antiplatelets_final = x['% ischemic stroke patients discharged (home) with antiplatelets']
if award != "STROKEREADY":
if (float(discharged_with_antiplatelets_final) >= 80 and float(discharged_with_antiplatelets_final) <= 84.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(discharged_with_antiplatelets_final) >= 85 and float(discharged_with_antiplatelets_final) <= 89.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(discharged_with_antiplatelets_final) >= 90):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
discharged_with_anticoagulants_final = x['% afib patients discharged (home) with anticoagulants']
if award != "STROKEREADY":
if (float(discharged_with_anticoagulants_final) >= 80 and float(discharged_with_anticoagulants_final) <= 84.99):
if (award == "PLATINUM" or award == "DIAMOND"):
award = "GOLD"
elif (float(discharged_with_anticoagulants_final) >= 85 and float(discharged_with_anticoagulants_final) <= 89.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(discharged_with_anticoagulants_final) >= 90):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
stroke_unit = x['% stroke patients treated in a dedicated stroke unit / ICU']
if award != "STROKEREADY":
if (float(stroke_unit) <= 0.99):
if (award == "DIAMOND"):
award = "PLATINUM"
elif (float(stroke_unit) >= 1):
if (award == "DIAMOND"):
award = "DIAMOND"
else:
award = "STROKEREADY"
return award
def _count_patients(self, dataframe):
""" The function calculating the number of patients per site.
:param dataframe: the dataframe with the raw data
:type dataframe: dataframe
:returns: the column with number of patients
"""
tmpDf = dataframe.groupby(['Protocol ID']).size().reset_index(name='count_patients')
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.fillna(0, inplace=True)
return factorDf['count_patients']
def _get_values_only_columns(self, column_name, value, dataframe):
""" The function calculating the numbeer of patients per site for the given value from the temporary dataframe.
:param column_name: the name of column name the number of patients should be calculated
:type column_name: str
:param value: the value for which we would like to get number of patients from the specific column
:type value: int
:param dataframe: the dataframe with the raw data
:type dataframe: pandas dataframe
:returns: the column with the number of patients
"""
tmpDf = dataframe[dataframe[column_name] == value].reset_index()[['Protocol ID', 'count']]
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.fillna(0, inplace=True)
return factorDf['count']
def _get_values_for_factors(self, column_name, value, new_column_name, df=None):
""" The function calculating the numbeer of patients per site for the given value from the temporary dataframe.
:param column_name: the name of column name the number of patients should be calculated
:type column_name: str
:param value: the value for which we would like to get number of patients from the specific column
:type value: int
:param new_column_name: to this value will be renamed the created column containing the number of patients
:type new_column_name: str
:param df: the dataframe with the raw data
:type df: pandas dataframe
:returns: the dataframe with calculated statistics
"""
# Check if type of column name is type of number, if not convert value into string
if (self.tmp[column_name].dtype != np.number):
value = str(value)
else:
value = value
tmpDf = self.tmp[self.tmp[column_name] == value].reset_index()[['Protocol ID', 'count']]
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
return factorDf
def _get_values_for_factors_more_values(self, column_name, value, new_column_name, df=None):
""" The function calculating the number of patients per site for the given value from the temporary dataframe.
:param column_name: the name of column name the number of patients should be calculated
:type column_name: str
:param value: the list of values for which we would like to get number of patients from the specific column
:type value: list
:param new_column_name: to this value will be renamed the created column containing the number of patients
:type new_column_name: str
:param df: the dataframe with the raw data
:type df: pandas dataframe
:returns: the dataframe with calculated statistics
"""
if df is None:
tmpDf = self.tmp[self.tmp[column_name].isin(value)].reset_index()[['Protocol ID', 'count']]
tmpDf = tmpDf.groupby('Protocol ID').sum().reset_index()
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
else:
tmpDf = df[df[column_name].isin(value)].reset_index()[['Protocol ID', 'count']]
tmpDf = tmpDf.groupby('Protocol ID').sum().reset_index()
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
return factorDf
def _get_values_for_factors_containing(self, column_name, value, new_column_name, df=None):
""" The function calculating the number of patients per site for the given value from the temporary dataframe.
:param column_name: the name of column name the number of patients should be calculated
:type column_name: str
:param value: the value of string type for which we would like to get number of patients from the specific column
:type value: str
:param new_column_name: to this value will be renamed the created column containing the number of patients
:type new_column_name: str
:param df: the dataframe with the raw data
:type df: pandas dataframe
:returns: the dataframe with calculated statistics
"""
if df is None:
tmpDf = self.tmp[self.tmp[column_name].str.contains(value)].reset_index()[['Protocol ID', 'count']]
tmpDf = tmpDf.groupby('Protocol ID').sum().reset_index()
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
else:
tmpDf = df[df[column_name].str.contains(value)].reset_index()[['Protocol ID', 'count']]
tmpDf = tmpDf.groupby('Protocol ID').sum().reset_index()
factorDf = self.statsDf.merge(tmpDf, how='outer')
factorDf.rename(columns={'count': new_column_name}, inplace=True)
factorDf.fillna(0, inplace=True)
return factorDf
def _get_ctmri_delta(self, hosp_time, ct_time):
""" The function calculating the difference between two times in minutes.
:param hosp_time: the time of hospitalization
:type hosp_time: time
:param ct_time: the time when CT/MRI was performed
:type ct_time: time
:returns: tdelta between two times in minutes
"""
timeformat = '%H:%M:%S'
# Check if both time are not None if yes, return 0 else return tdelta
if hosp_time is None or ct_time is None or pd.isnull(hosp_time) or pd.isnull(ct_time):
tdeltaMin = 0
elif hosp_time == 0 or ct_time == 0:
tdeltaMin = 0
else:
if isinstance(ct_time, time) and isinstance(hosp_time, time):
tdelta = datetime.combine(date.today(), ct_time) - datetime.combine(date.today(), hosp_time)
elif isinstance(ct_time, time):
tdelta = datetime.combine(date.today(), ct_time) - datetime.strptime(hosp_time, timeformat)
elif isinstance(hosp_time, time):
tdelta = datetime.strptime(ct_time, timeformat) - datetime.strptime(hosp_time, timeformat)
else:
tdelta = datetime.strptime(ct_time, timeformat) - datetime.strptime(hosp_time, timeformat)
tdeltaMin = tdelta.total_seconds()/60.0
if tdeltaMin > 60:
res = 2
elif tdeltaMin <= 60 and tdeltaMin > 0:
res = 1
else:
res = -2
return res
def _return_dataset(self):
""" The function returning dataframe. """
return self.df
def _return_stats(self):
""" The function returning the dataframe with the calculated statistics!
:returns: the dataframe with the statistics
"""
return self.statsDf
def _get_sites(self, df):
""" The function returning the list of sites in the preprocessed data.
:returns: the list of sites
"""
site_ids = df['Site ID'].tolist()
site_list = list(set(site_ids))
return site_list
@property
def country_name(self):
return self._country_name
| 96.405449 | 1,175 | 0.683898 | 240,265 | 0.998491 | 0 | 0 | 71 | 0.000295 | 0 | 0 | 129,727 | 0.539118 |
616393b01319880a58b68d651fe6bd662c0ef5d6 | 13,451 | py | Python | api_level_2/qt/basic.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 12 | 2018-06-28T13:40:53.000Z | 2022-01-07T12:46:15.000Z | api_level_2/qt/basic.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 6 | 2019-04-29T16:55:38.000Z | 2022-03-04T17:00:15.000Z | api_level_2/qt/basic.py | olklymov/valkka-examples | 92be5f815cd3927100ccc4220c588bdd7c510797 | [
"MIT"
] | 5 | 2019-04-21T15:42:55.000Z | 2021-08-16T10:53:30.000Z | """
basic.py : Some basic classes encapsulating filter chains
* Copyright 2017-2020 Valkka Security Ltd. and Sampsa Riikonen
*
* Authors: Sampsa Riikonen <sampsa.riikonen@iki.fi>
*
* This file is part of the Valkka library.
*
* Valkka is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>
*
*/
@file basic.py
@author Sampsa Riikonen
@date 2017
@version 1.2.2
@brief Some basic classes encapsulating filter chains
"""
import sys
import time
import random
# so, everything that has .core, refers to the api1 level (i.e. swig
# wrapped cpp code)
from valkka import core
# api2 versions of the thread classes
from valkka.api2.threads import LiveThread, OpenGLThread
from valkka.api2.tools import parameterInitCheck, typeCheck
pre_mod = "valkka.api2.chains.basic : "
class BasicFilterchain:
"""This class implements the following filterchain:
::
(LiveThread:livethread) -->> (AVThread:avthread) -->> (OpenGLThread:glthread)
i.e. the stream is decoded by an AVThread and sent to the OpenGLThread for presentation
"""
parameter_defs = {
"livethread": LiveThread,
"openglthread": OpenGLThread,
"address": str,
"slot": int,
# these are for the AVThread instance:
"n_basic": (int, 20), # number of payload frames in the stack
"n_setup": (int, 20), # number of setup frames in the stack
"n_signal": (int, 20), # number of signal frames in the stack
"flush_when_full": (bool, False), # clear fifo at overflow
"affinity": (int, -1),
"verbose": (bool, False),
"msreconnect": (int, 0),
# Timestamp correction type: TimeCorrectionType_none,
# TimeCorrectionType_dummy, or TimeCorrectionType_smart (default)
"time_correction": None,
# Operating system socket ringbuffer size in bytes # 0 means default
"recv_buffer_size": (int, 0),
# Reordering buffer time for Live555 packets in MILLIseconds # 0 means
# default
"reordering_mstime": (int, 0),
"n_threads": (int, 1)
}
def __init__(self, **kwargs):
# auxiliary string for debugging output
self.pre = self.__class__.__name__ + " : "
# check for input parameters, attach them to this instance as
# attributes
parameterInitCheck(self.parameter_defs, kwargs, self)
self.init()
def init(self):
self.idst = str(id(self))
self.makeChain()
self.createContext()
self.startThreads()
self.active = True
def __del__(self):
self.close()
def close(self):
if (self.active):
if (self.verbose):
print(self.pre, "Closing threads and contexes")
self.decodingOff()
self.closeContext()
self.stopThreads()
self.active = False
def makeChain(self):
"""Create the filter chain
"""
self.gl_in_filter = self.openglthread.getInput(
) # get input FrameFilter from OpenGLThread
self.framefifo_ctx = core.FrameFifoContext()
self.framefifo_ctx.n_basic = self.n_basic
self.framefifo_ctx.n_setup = self.n_setup
self.framefifo_ctx.n_signal = self.n_signal
self.framefifo_ctx.flush_when_full = self.flush_when_full
self.avthread = core.AVThread(
"avthread_" + self.idst,
self.gl_in_filter,
self.framefifo_ctx)
if self.affinity > -1 and self.n_threads > 1:
print("WARNING: can't use affinity with multiple threads")
self.avthread.setAffinity(self.affinity)
if self.affinity > -1:
self.avthread.setNumberOfThreads(self.n_threads)
# get input FrameFilter from AVThread
self.av_in_filter = self.avthread.getFrameFilter()
def createContext(self):
"""Creates a LiveConnectionContext and registers it to LiveThread
"""
# define stream source, how the stream is passed on, etc.
self.ctx = core.LiveConnectionContext()
# slot number identifies the stream source
self.ctx.slot = self.slot
if (self.address.find("rtsp://") == 0):
self.ctx.connection_type = core.LiveConnectionType_rtsp
else:
self.ctx.connection_type = core.LiveConnectionType_sdp # this is an rtsp connection
self.ctx.address = self.address
# stream address, i.e. "rtsp://.."
self.ctx.framefilter = self.av_in_filter
self.ctx.msreconnect = self.msreconnect
# some extra parameters:
"""
// ctx.time_correction =TimeCorrectionType::none;
// ctx.time_correction =TimeCorrectionType::dummy;
// default time correction is smart
// ctx.recv_buffer_size=1024*1024*2; // Operating system ringbuffer size for incoming socket
// ctx.reordering_time =100000; // Live555 packet reordering treshold time (microsecs)
"""
if (self.time_correction is not None):
self.ctx.time_correction = self.time_correction
# self.time_correction=core.TimeCorrectionType_smart # default ..
self.ctx.recv_buffer_size = self.recv_buffer_size
self.ctx.reordering_time = self.reordering_mstime * \
1000 # from millisecs to microsecs
# send the information about the stream to LiveThread
self.livethread.registerStream(self.ctx)
self.livethread.playStream(self.ctx)
def closeContext(self):
self.livethread.stopStream(self.ctx)
self.livethread.deregisterStream(self.ctx)
def startThreads(self):
"""Starts thread required by the filter chain
"""
self.avthread.startCall()
def stopThreads(self):
"""Stops threads in the filter chain
"""
if (self.verbose):
print(self.pre, "stopping avthread")
self.avthread.stopCall()
if (self.verbose):
print(self.pre, "avthread stopped")
def decodingOff(self):
self.avthread.decodingOffCall()
def decodingOn(self):
self.avthread.decodingOnCall()
class ShmemFilterchain(BasicFilterchain):
"""A filter chain with a shared mem hook
::
(LiveThread:livethread) -->> (AVThread:avthread) --+
| main branch
{ForkFrameFilter: fork_filter} <-------------------+
|
branch 1 +-->> (OpenGLThread:glthread)
|
branch 2 +--> {IntervalFrameFilter: interval_filter} --> {SwScaleFrameFilter: sws_filter} --> {RGBShmemFrameFilter: shmem_filter}
* Frames are decoded in the main branch from H264 => YUV
* The stream of YUV frames is forked into two branches
* branch 1 goes to OpenGLThread that interpolates YUV to RGB on the GPU
* branch 2 goes to interval_filter that passes a YUV frame only once every second. From there, frames are interpolated on the CPU from YUV to RGB and finally passed through shared memory to another process.
"""
parameter_defs = { # additional parameters to the mother class
# images passed over shmem are full-hd/4 reso
"shmem_image_dimensions": (tuple, (1920 // 4, 1080 // 4)),
# .. passed every 1000 milliseconds
"shmem_image_interval": (int, 1000),
# size of the ringbuffer
"shmem_ringbuffer_size": (int, 10),
"shmem_name": None,
"event_fd": None
}
parameter_defs.update(BasicFilterchain.parameter_defs) # don't forget!
def __init__(self, **kwargs):
# auxiliary string for debugging output
self.pre = self.__class__.__name__ + " : "
# check for input parameters, attach them to this instance as
# attributes
parameterInitCheck(self.parameter_defs, kwargs, self)
typeCheck(self.shmem_image_dimensions[0], int)
typeCheck(self.shmem_image_dimensions[1], int)
self.init()
def makeChain(self):
"""Create the filter chain
"""
if (self.shmem_name is None):
self.shmem_name = "shmemff" + self.idst
# print(self.pre,self.shmem_name)
# self.n_bytes =self.shmem_image_dimensions[0]*self.shmem_image_dimensions[1]*3
n_buf = self.shmem_ringbuffer_size
# branch 1
# get input FrameFilter from OpenGLThread
self.gl_in_filter = self.openglthread.getInput()
# branch 2
# print(self.pre,"using shmem name",self.shmem_name)
# print(self.shmem_name)
self.shmem_filter = core.RGBShmemFrameFilter(
self.shmem_name,
n_buf,
self.shmem_image_dimensions[0],
self.shmem_image_dimensions[1]) # shmem id, cells, width, height
# self.shmem_filter =core.InfoFrameFilter ("info"+self.idst)
if self.event_fd is not None:
self.shmem_filter.useFd(self.event_fd)
self.sws_filter = core.SwScaleFrameFilter(
"sws_filter" + self.idst,
self.shmem_image_dimensions[0],
self.shmem_image_dimensions[1],
self.shmem_filter)
self.interval_filter = core.TimeIntervalFrameFilter(
"interval_filter" + self.idst, self.shmem_image_interval, self.sws_filter)
# fork: writes to branches 1 and 2
# self.fork_filter =core.ForkFrameFilter
# ("fork_filter"+self.idst,self.gl_in_filter,self.sws_filter) # FIX
self.fork_filter = core.ForkFrameFilter(
"fork_filter" + self.idst,
self.gl_in_filter,
self.interval_filter)
# self.fork_filter =core.ForkFrameFilter ("fork_filter"+self.idst,self.gl_in_filter,None)
# self.fork_filter=self.gl_in_filter # debugging
# main branch
self.framefifo_ctx = core.FrameFifoContext()
self.framefifo_ctx.n_basic = self.n_basic
self.framefifo_ctx.n_setup = self.n_setup
self.framefifo_ctx.n_signal = self.n_signal
self.framefifo_ctx.flush_when_full = self.flush_when_full
self.avthread = core.AVThread(
"avthread_" + self.idst,
self.fork_filter,
self.framefifo_ctx) # AVThread writes to self.fork_filter
self.avthread.setAffinity(self.affinity)
# get input FrameFilter from AVThread
self.av_in_filter = self.avthread.getFrameFilter()
# self.av_in_filter is used by BasicFilterchain.createContext that passes self.av_in_filter to LiveThread
# # self.live_out_filter =core.InfoFrameFilter ("live_out_filter"+self.idst,self.av_in_filter)
def getShmemPars(self):
"""Returns shared mem name that should be used in the client process and the ringbuffer size
"""
# SharedMemRingBuffer(const char* name, int n_cells, std::size_t n_bytes, int mstimeout=0, bool is_server=false); // <pyapi>
# return self.shmem_name, self.shmem_ringbuffer_size, self.n_bytes
return self.shmem_name, self.shmem_ringbuffer_size, self.shmem_image_dimensions
def test1():
st = """ Test single stream
"""
pre = pre_mod + "test1 :"
print(pre, st)
livethread = LiveThread(
name="live_thread",
verbose=True
)
openglthread = OpenGLThread(
name="mythread",
n_1440p=5,
verbose=True
)
# now livethread and openglthread are running ..
chain = BasicFilterchain(
livethread=livethread,
openglthread=openglthread,
address="rtsp://admin:admin@192.168.1.10",
slot=1
)
print("sleeping for some secs")
time.sleep(3)
print("bye!")
def test2():
st = """ Test ShmemFilterchain
"""
pre = pre_mod + "test2 :"
print(pre, st)
livethread = LiveThread(
name="live_thread",
verbose=True
)
openglthread = OpenGLThread(
name="mythread",
n_1440p=5,
verbose=True
)
# now livethread and openglthread are running ..
chain = ShmemFilterchain(
livethread=livethread,
openglthread=openglthread,
address="rtsp://admin:admin@192.168.1.10",
slot=1,
# images passed over shmem are full-hd/4 reso
shmem_image_dimensions=(1920 // 4, 1080 // 4),
shmem_image_interval=1000, # .. passed every 1000 milliseconds
shmem_ringbuffer_size=10 # size of the ringbuffer
)
print("sleeping for some secs")
time.sleep(3)
print("bye!")
def main():
pre = pre_mod + "main :"
print(pre, "main: arguments: ", sys.argv)
if (len(sys.argv) < 2):
print(pre, "main: needs test number")
else:
st = "test" + str(sys.argv[1]) + "()"
exec(st)
if (__name__ == "__main__"):
main()
| 34.401535 | 211 | 0.635046 | 10,334 | 0.76827 | 0 | 0 | 0 | 0 | 0 | 0 | 6,425 | 0.47766 |
6163cb2b3ca8b632e91d64eb5923fcae7762780f | 455 | py | Python | need_an_image/utils/store.py | RyouMon/i-need-an-image | a2b2b42ca0ab9cb2e9886733339fdec7641f0246 | [
"MIT"
] | 1 | 2022-02-05T04:25:39.000Z | 2022-02-05T04:25:39.000Z | need_an_image/utils/store.py | RyouMon/i-need-an-image | a2b2b42ca0ab9cb2e9886733339fdec7641f0246 | [
"MIT"
] | 5 | 2021-08-11T15:59:04.000Z | 2021-09-18T15:21:37.000Z | need_an_image/utils/store.py | RyouMon/i-need-an-image | a2b2b42ca0ab9cb2e9886733339fdec7641f0246 | [
"MIT"
] | null | null | null | import os.path
from uuid import uuid4
def save_image(image, save_to='.'):
"""
Save image to local dick
"""
suffix = '.jpg'
if image.mode == 'P':
image = image.convert('RGBA')
if image.mode == 'RGBA':
suffix = '.png'
filename = uuid4().hex + suffix
if not os.path.isdir(save_to):
os.makedirs(save_to)
filename = os.path.join(save_to, filename)
image.save(filename)
return filename
| 18.2 | 46 | 0.589011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.153846 |
61654d0d5ba34a716dc57dc18432e7eb9d5225f1 | 670 | py | Python | leetcode/191.py | pingrunhuang/CodeChallenge | a8e5274e04c47d851836197907266418af4f1a22 | [
"MIT"
] | null | null | null | leetcode/191.py | pingrunhuang/CodeChallenge | a8e5274e04c47d851836197907266418af4f1a22 | [
"MIT"
] | null | null | null | leetcode/191.py | pingrunhuang/CodeChallenge | a8e5274e04c47d851836197907266418af4f1a22 | [
"MIT"
] | null | null | null | '''
191. Number of 1 Bits
Write a function that takes an unsigned integer and returns the number of ’1' bits it has (also known as the Hamming weight).
For example, the 32-bit integer ’11' has binary representation 00000000000000000000000000001011, so the function should return 3.
'''
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
que=[]
offset = 2
while n!=0:
if n%offset == 1:
que.append(1)
n=n//2
return len(que)
if __name__ == '__main__':
solution = Solution()
t1=11
print(solution.hammingWeight(t1))
| 23.928571 | 129 | 0.591045 | 277 | 0.410979 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.531157 |
6165681678fb46f70ca964ea77be210bc24f6207 | 3,817 | py | Python | examples/color4.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 125 | 2016-11-24T09:04:28.000Z | 2022-01-22T14:06:56.000Z | examples/color4.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 52 | 2017-11-08T23:23:02.000Z | 2022-03-20T03:17:39.000Z | examples/color4.py | yang69can/pyngl | 78a7040ce9de4b7a442b0c3b5faecccab2f01426 | [
"Apache-2.0"
] | 25 | 2017-08-27T10:50:43.000Z | 2022-01-29T14:56:05.000Z | #
# File:
# color4.py
#
# Synopsis:
# Draws sixteen sample color boxs with RGB labels.
#
# Category:
# Colors
#
# Author:
# Fred Clare
#
# Date of initial publication:
# January, 2006
#
# Description:
# This example draws sixteen color boxes using the RGB
# values for named colors. The boxes are labeled with
# the color name and the associated RGB values.
#
# Effects illustrated:
# o Drawing lines and polygons in NDC space.
# o RGB equivalents for some named colors.
# o Converting integer RGB color specifications to floating point.
#
# Output:
# o One plot is produced with sixteen sample color boxes.
#
from __future__ import print_function
import Ngl
import numpy
#
# Define the colors and labels to be used.
#
colors_and_labels = \
[ \
[233, 150, 122], "DarkSalmon", \
[164, 42, 42], "Brown", \
[255, 127, 0], "DarkOrange1", \
[255, 0, 0], "Red", \
[255, 255, 0], "Yellow", \
[ 0, 255, 0], "Green", \
[ 34, 139, 34], "ForestGreen", \
[ 0, 255, 255], "Cyan", \
[ 79, 148, 205], "SteelBlue3", \
[ 0, 0, 255], "Blue", \
[148, 0, 211], "DarkViolet", \
[255, 0, 255], "Magneta", \
[255, 255, 255], "White", \
[153, 153, 153], "Gray60", \
[102, 102, 102], "Gray40", \
[ 0, 0, 0], "Black" \
]
#
# Open a workstation with a default color table having
# background color "black" and foreground color "white".
#
rlist = Ngl.Resources()
rlist.wkColorMap = "default"
rlist.wkForegroundColor = "White"
rlist.wkBackgroundColor = "Black"
wks_type = "png"
wks = Ngl.open_wks(wks_type,"color4",rlist)
#
# Extract the colors and labels.
#
colors = colors_and_labels[0:len(colors_and_labels):2]
labels = colors_and_labels[1:len(colors_and_labels):2]
#
# Set up arrays and resource lists for drawing the boxes.
# Select "Helvetica-Bold" for all text.
#
x = numpy.zeros(5,'f')
y = numpy.zeros(5,'f')
poly_res = Ngl.Resources()
text_res = Ngl.Resources()
text_res.txFont = "Helvetica-Bold"
#
# Draw the color boxes and titles.
#
for i in range(0,len(colors)):
#
# delx_0 - horizontal spacing between boxes.
# delx_1 - width of a box.
# dely_0 - vertical spacing between boxes.
# dely_1 - height of a box.
#
delx_0, delx_1, dely_0, dely_1 = 0.245, 0.235, 0.22, 0.15
x[0], y[0] = 0.015 + delx_0*(i%4), 0.90 - (i//4)*dely_0
x[1], y[1] = x[0] + delx_1 , y[0]
x[2], y[2] = x[1] , y[1] - dely_1
x[3], y[3] = x[0] , y[2]
x[4], y[4] = x[0] , y[0]
#
# Convert the integer color values obtained from the
# named color chart (as entered above) to floating
# point numbers in the range 0. to 1.
#
r, g, b = colors[i][0]/255., colors[i][1]/255., colors[i][2]/255.
poly_res.gsFillColor = [r,g,b] # Ngl.new_color(wks, r, g, b)
#
# Draw a white outline if the color is black, otherwise draw a colored box.
#
if (labels[i] == "Black"):
Ngl.polyline_ndc(wks, x, y, poly_res)
else:
Ngl.polygon_ndc(wks, x, y, poly_res)
#
# Label the boxes.
#
text_res.txFontHeightF = 0.017
Ngl.text_ndc(wks, labels[i], 0.5*(x[0]+x[1]), y[0] + 0.0125, text_res)
rgb_label = "R={:4.2f} G={:4.2f} B={:4.2f}".format(r, g, b)
text_res.txFontHeightF = 0.015
Ngl.text_ndc(wks, rgb_label, 0.5*(x[0]+x[1]), y[3] - 0.0125, text_res)
#
# Plot top and bottom labels.
#
text_res.txFontHeightF = 0.025
Ngl.text_ndc(wks, "Sixteen Sample Colors", 0.5, 0.96, text_res)
text_res.txFontHeightF = 0.018
Ngl.text_ndc(wks, "The titles below each box indicate Red, Green, and Blue intensity values.", 0.5, 0.035, text_res)
Ngl.frame(wks)
Ngl.end()
| 28.066176 | 116 | 0.589992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,744 | 0.456903 |
61672e950ab29487e7137f084aa554a9e2b34c33 | 90 | py | Python | t4_hotel/apps.py | dnswd/SIRUCO | eb1dae3dbbae384ef53d7bd262c5977a13fad2ef | [
"Unlicense"
] | null | null | null | t4_hotel/apps.py | dnswd/SIRUCO | eb1dae3dbbae384ef53d7bd262c5977a13fad2ef | [
"Unlicense"
] | null | null | null | t4_hotel/apps.py | dnswd/SIRUCO | eb1dae3dbbae384ef53d7bd262c5977a13fad2ef | [
"Unlicense"
] | null | null | null | from django.apps import AppConfig
class T4HotelConfig(AppConfig):
name = 't4_hotel'
| 15 | 33 | 0.755556 | 53 | 0.588889 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.111111 |
616842334b3b74678e4d196cefe1da10ade1ad1a | 4,495 | py | Python | gameAI.py | dshao2007/TermProject | ff277a107e7427e76d95311c511890d9202d676f | [
"MIT"
] | null | null | null | gameAI.py | dshao2007/TermProject | ff277a107e7427e76d95311c511890d9202d676f | [
"MIT"
] | null | null | null | gameAI.py | dshao2007/TermProject | ff277a107e7427e76d95311c511890d9202d676f | [
"MIT"
] | null | null | null |
import random
from evaluator import ChessEval
class ChessAI(object):
INF = 8000
def __init__(self,game,color):
self.game = game
self.evaluator = ChessEval(game)
self.color = color
self.drunkMode = False
self.points = {'Pawn': 10, 'Knight': 30, 'Bishop': 30, 'Rook': 50, 'Queen': 90, 'King': 200}
self.depth = 3
def changeLevel(self,level):
self.depth = level + 1
def getLevel(self):
return self.depth - 1
def nextMove(self, playerColor=None):
if playerColor is None:
playerColor = self.color
results = self.game.getAllLegalMoves(playerColor, fullValidate=False)
if len(results) == 0:
return None
move, _ = self.minimax(-self.INF, self.INF, playerColor, playerColor, self.depth)
return move
# Minimax is a completely new concept to me, and I the reference from
# https://www.chessprogramming.org/Search to learn about it
# All of the code is mine, with the exception of alpha beta pruning, which is
# a standard template I got from the website.
def minimax(self, alpha, beta, color, playerColor, depth):
results = self.game.getAllLegalMoves(color, fullValidate=False)
score = self.getScore()
if playerColor == 'White':
score = -score
if depth == 0:
return None, score
if len(results) == 0:
return None, score
if color == 'White':
otherColor = 'Black'
else:
otherColor = 'White'
if playerColor == color:
move = None
maxVal = -self.INF
for i in range(len(results)):
if self.game.movePiece(results[i][0], results[i][1], results[i][2],
aiMode=True, simulate=False):
_, eval = self.minimax(alpha, beta, otherColor, playerColor, depth - 1)
# maxVal = max(eval, maxVal)
if eval > maxVal:
move = results[i]
maxVal = eval
self.game.undoLastMove()
alpha = max(alpha, maxVal)
if beta <= alpha:
break
else:
pass
# moves.append(move)
return (move, maxVal)
else:
minVal = self.INF
move = None
for i in range(len(results)):
if not self.game.movePiece(results[i][0], results[i][1], results[i][2],
aiMode=True, simulate=False):
continue
_, eval = self.minimax(alpha, beta, otherColor, playerColor, depth - 1)
self.game.undoLastMove()
# minVal = min(eval, minVal)
if eval < minVal:
minVal = eval
move = results[i]
beta = min(beta, minVal)
if beta <= alpha:
break
return move, minVal
def getScore(self):
blackScore, whiteScore = self.evaluator.getScore()
if self.game.inCheck('Black') and self.game.checkMate('Black'):
blackScore -= 900
if self.game.inCheck('White') and self.game.checkMate('White'):
whiteScore -= 900
if self.color == 'White':
return whiteScore - blackScore
else:
return blackScore - whiteScore
def getScoreSimple(self):
w = self.getWhiteScore()
b = self.getBlackScore()
if self.color == 'White':
return w - b
else:
return b - w
# Assign large score to checkmate so AI goes for the win
def getWhiteScore(self):
score = 0
for piece in self.game.getPieces():
if piece.color == 'White':
score += self.points[piece.name]
if self.game.inCheck('Black') and self.game.checkMate('Black'):
score += 900
return score
def getBlackScore(self):
score = 0
for piece in self.game.getPieces():
if piece.color == 'Black':
score += self.points[piece.name]
if self.game.inCheck('White') and self.game.checkMate('White'):
score += 900
return score
| 33.051471 | 101 | 0.507008 | 4,444 | 0.988654 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.119244 |
616c7ed2448ca92350545be4a2621305508ad028 | 604 | py | Python | scripts/Render_Animation.py | eecheve/Gaussian-2-Blender | 14811ad6092ec7be09b043752a2db3514599eede | [
"Apache-2.0"
] | 1 | 2021-09-17T21:33:45.000Z | 2021-09-17T21:33:45.000Z | scripts/Render_Animation.py | eecheve/Gaussian-2-Blender | 14811ad6092ec7be09b043752a2db3514599eede | [
"Apache-2.0"
] | null | null | null | scripts/Render_Animation.py | eecheve/Gaussian-2-Blender | 14811ad6092ec7be09b043752a2db3514599eede | [
"Apache-2.0"
] | 1 | 2021-09-19T21:38:24.000Z | 2021-09-19T21:38:24.000Z | import bpy
def Render_Animation():
bpy.ops.object.camera_add(enter_editmode=False, align='VIEW', location=(0, 0, 0), rotation=(1.60443, 0.014596, 2.55805))
bpy.ops.object.light_add(type='SUN', location=(0, 0, 5)) #setting camera and lights for rendering
cam = bpy.data.objects["Camera"]
scene = bpy.context.scene
mesh_objs = [o for o in scene.objects if o.type =='MESH']
for ob in mesh_objs:
ob.select_set(True)
bpy.ops.view3d.camera_to_view_selected()
bpy.context.scene.render.film_transparent = True
bpy.context.scene.render.image_settings.color_mode = 'RGBA'
| 40.266667 | 124 | 0.708609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.11755 |
616dffb214ea24ee589984d6eb4777f8c7994c0b | 6,843 | py | Python | src/dictstore/interface.py | sampathbalivada/dictstore | d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6 | [
"Apache-2.0"
] | 1 | 2021-12-21T14:23:50.000Z | 2021-12-21T14:23:50.000Z | src/dictstore/interface.py | sampathbalivada/dictstore | d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6 | [
"Apache-2.0"
] | null | null | null | src/dictstore/interface.py | sampathbalivada/dictstore | d58c8ea22d52d54d93e189cbf290ffbc7e04c6f6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Sai Sampath Kumar Balivada
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
dictstore is a simple local data store
for Python that aims to provide an interface
similar to a python dictionary.
"""
from typing import Any, DefaultDict
from pathlib import Path
import ast
import dictstore.helpers as helpers
from dictstore.exceptions import DataStoreFileCorrupted, UnsupportedValueType
from dictstore.file_handler import FileHandler
class DictStoreSingleton(type):
"""
metaclass to implement singleton behavior for DictStore class
"""
_instances = DefaultDict(None)
def __call__(cls, datastore_location='./default.dictstore') -> Any:
if datastore_location in cls._instances:
return cls._instances[datastore_location]
instance = super(DictStoreSingleton, cls).__call__(datastore_location)
cls._instances[datastore_location] = instance
return instance
class DictStore(metaclass=DictStoreSingleton):
"""
A class that initializes the datastore into the memory
and provides functions to manipulate it.
"""
def __init__(self, datastore_location='./default.dictstore') -> None:
"""
Initializes the in memory dictionary and
copies all the records from the database file to memory
"""
# create an in memory dictionary to store the value
# and set default value to None
self.in_memory_dictionary = {}
self.in_memory_dictionary.setdefault(None)
# check if the datafile is already opened and return
# the object already opened else continue creating a new object
self.datastore_location = Path(datastore_location).resolve().__str__()
self.file_handler = FileHandler(self.datastore_location)
# fetch the file contents and parse accordingly
# parse key and value as JSON objects
data = self.file_handler.read_from_file()
# check if the number of lines are even
if len(data) % 2 != 0:
raise DataStoreFileCorrupted()
for line_number_of_key in range(0, len(data), 2):
key = data[line_number_of_key]
key_parsed = ast.literal_eval(key)
value = data[line_number_of_key + 1]
value_parsed = ast.literal_eval(value)
self.in_memory_dictionary[key_parsed] = value_parsed
def __rewrite_data_file(self) -> None:
"""
converts in memory dictionary to string
asks file handler to write the resulting string
to the data file.
"""
# convert each record into the desired format
# Format:
# key \n
# json(value) \n
data_file_cache = []
for key, value in self.in_memory_dictionary.items():
print(key, '|', value)
data_file_cache.append(helpers.get_escaped_string(key) + '\n')
data_file_cache.append(helpers.get_escaped_string(value) + '\n')
self.file_handler.rewrite_to_file(data_file_cache)
def __add_record_to_data_file(self, key, value) -> None:
"""
converts the given record to string
asks file handler to append the resulting string
to the end of data file
"""
data_record_cache = helpers.get_escaped_string(key) + '\n'
data_record_cache += helpers.get_escaped_string(value) + '\n'
self.file_handler.append_to_file(data_record_cache)
# -----------------
# Read Operations
# -----------------
# All read operation are performed on the in memory dictionary
# -----------------
def keys(self) -> list:
"""returns a list of all the keys in the datastore"""
return list(self.in_memory_dictionary.keys())
def values(self) -> list:
"""returns a list of all the values in the datastore"""
return list(self.in_memory_dictionary.values())
def get(self, key: Any) -> Any:
"""
takes a key and returns the value if it exists.
returns None if the key does not exist.
"""
return self.in_memory_dictionary.get(key)
# -----------------
# Write Operations
# -----------------
# All write operations are performed with a write through approach
#
# Write operations are first performed on the in memory dictionary
# and updated on the data file
# -----------------
def upsert_record(self, key: Any, value: Any) -> None:
"""
takes a key value pair
and updates the value if it already exists
creates a new record otherwise
"""
if not helpers.is_supported_key_type(key):
message = ('Supported key types are '
'int, float, str, tuple and NoneType'
)
raise KeyError(message)
if not helpers.is_supported_value_type(value):
raise UnsupportedValueType()
# if there is no record with the given key
# update the in memory dictionary and
# add record to the data file
if self.get(key) is None:
self.in_memory_dictionary[key] = value
self.__add_record_to_data_file(key, value)
# if a record exists with the given key
# add new key-value pair to in memory dictionary
# and rewrite the data file
else:
self.in_memory_dictionary[key] = value
self.__rewrite_data_file()
def remove(self, key):
"""
takes a key
and removes the record if it exists
"""
# if a record exists with the given key
# remove it from the in memory dictionary
# and rewrite the data file
if self.get(key) is not None:
del self.in_memory_dictionary[key]
self.__rewrite_data_file()
def __len__(self) -> int:
"""returns the number of records in the database"""
return self.in_memory_dictionary.__len__()
def __delitem__(self, key):
"""delete key value pair from the datastore"""
self.remove(key)
def __getitem__(self, key):
"""perform get operation with the given key"""
return self.get(key)
def __setitem__(self, key, value):
"""perform upsert operation with the given key and value"""
self.upsert_record(key, value)
| 32.585714 | 78 | 0.640362 | 5,889 | 0.860587 | 0 | 0 | 0 | 0 | 0 | 0 | 3,179 | 0.464562 |
616e5cf634f2586b19810e30e33e8aba47812b49 | 494 | py | Python | slender/tests/list/test_include.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | 1 | 2020-01-10T21:51:46.000Z | 2020-01-10T21:51:46.000Z | slender/tests/list/test_include.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null | slender/tests/list/test_include.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null |
import re
from unittest import TestCase
from expects import expect, equal, raise_error, be_true, be_false
from slender import List
class TestInclude(TestCase):
def test_include_if_value_in_array(self):
e = List(['apple', 'bear', 'dog', 'plum', 'grape', 'cat', 'anchor'])
expect(e.include('bear')).to(be_true)
def test_include_if_value_not_in_array(self):
e = List(['apple', 'bear', 'cat', 'anchor'])
expect(e.include('dog')).to(be_false)
| 27.444444 | 76 | 0.653846 | 348 | 0.704453 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.163968 |
616ebe1d1635b2f07a1a9a30098f7891fad01adc | 7,261 | py | Python | tests/Util/test_config.py | JI511/Personal_Fitness | 25d54908398caf9291e70069dca97a567b1bd94b | [
"MIT"
] | null | null | null | tests/Util/test_config.py | JI511/Personal_Fitness | 25d54908398caf9291e70069dca97a567b1bd94b | [
"MIT"
] | 75 | 2019-04-01T01:57:34.000Z | 2019-09-26T00:14:28.000Z | tests/Util/test_config.py | JI511/Personal_Fitness | 25d54908398caf9291e70069dca97a567b1bd94b | [
"MIT"
] | 1 | 2019-08-11T07:25:15.000Z | 2019-08-11T07:25:15.000Z | # ----------------------------------------------------------------------------------------------------------------------
# Body Weight test cases
# ----------------------------------------------------------------------------------------------------------------------
# imports
import unittest
import tempfile
import os
import shutil
import logging
from src.Util.config import Config
from src.Util.constants import Constants
class TestConfig(unittest.TestCase):
"""
Class for testing the body weight procedure.
"""
def setUp(self):
"""
Initializes unit test variables.
"""
self.logs_dir = tempfile.mkdtemp()
self.file_path = os.path.join(self.logs_dir, 'test_config.ini')
self.logger = logging.getLogger(__name__)
self.config = Config(logger=self.logger,
output_path=self.logs_dir)
self.section = 'OPTIONS'
self.option = 'water'
def tearDown(self):
"""
Performs any clean up needed.
"""
self.connection = None
if os.path.exists(self.logs_dir):
shutil.rmtree(self.logs_dir)
# ------------------------------------------------------------------------------------------------------------------
# read_config_option tests
# ------------------------------------------------------------------------------------------------------------------
def test_read_config_option_nominal(self):
"""
Checks that the default config file is created properly.
"""
value = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(value, "oz")
def test_read_config_option_bad_option(self):
"""
Attempts to get a bad value in the config file.
"""
with self.assertRaises(KeyError) as error:
self.config.read_config_option(section=self.section,
option="bad")
self.assertTrue('bad' in error.exception)
# ------------------------------------------------------------------------------------------------------------------
# update_config_option tests
# ------------------------------------------------------------------------------------------------------------------
def test_update_config_option_nominal(self):
"""
Updates a config value to be used in the future.
"""
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
water_type = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(value, water_type)
def test_update_config_retain_unique_values(self):
"""
Updating an option should keep unaffected values the same when rewriting.
"""
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
value = '5'
status = self.config.update_config_option(section=self.section,
option='backup_rate',
value=value)
self.assertTrue(status)
water_type = self.config.read_config_option(section=self.section,
option=self.option)
backup_rate = self.config.read_config_option(section=self.section,
option='backup_rate')
self.assertEqual(water_type, 'mL')
self.assertEqual(backup_rate, '5')
def test_update_config_option_bad_section(self):
"""
Attempts to change a config option with a section that does not exist.
"""
status = self.config.update_config_option(section='bad',
option=self.option,
value='mL')
self.assertFalse(status)
def test_update_config_option_bad_option(self):
"""
Attempts to change a config option that does not exist.
"""
status = self.config.update_config_option(section=self.section,
option='bad',
value='mL')
self.assertFalse(status)
# ------------------------------------------------------------------------------------------------------------------
# check_config_file_values tests
# ------------------------------------------------------------------------------------------------------------------
def test_check_config_file_values_nominal(self):
"""
A new default has been added to a section. Add the default value to an already existing config file. The old
config values will remain.
"""
Constants.config_defaults[self.section]['test'] = 'new'
value = 'mL'
status = self.config.update_config_option(section=self.section,
option=self.option,
value=value)
self.assertTrue(status)
self.config.check_config_file_values()
added_default = self.config.read_config_option(section=self.section,
option='test')
self.assertEqual(added_default, 'new')
old_value = self.config.read_config_option(section=self.section,
option=self.option)
self.assertEqual(old_value, 'mL')
# ------------------------------------------------------------------------------------------------------------------
# create_backup_database tests
# ------------------------------------------------------------------------------------------------------------------
def test_create_backup_database_nominal(self):
"""
Creates a backup database when no other backups are present
"""
pass
def test_create_backup_database_already_exists(self):
"""
Checks for a backup database file, and sees that one has been created within the backup rate.
"""
pass
def test_create_backup_database_needed(self):
"""
Checks for a backup database file, one does exist, but a new one is needed.
"""
pass
def test_create_backup_database_no_backup_db_folder(self):
"""
Creates the backup_db folder within the cwd if it does not already exist.
"""
pass
# ----------------------------------------------------------------------------------------------------------------------
# End
# --------------------------------------------------------------------------------------------------------------------
| 43.479042 | 120 | 0.440986 | 6,582 | 0.906487 | 0 | 0 | 0 | 0 | 0 | 0 | 2,927 | 0.403113 |
61710e8907c2bd15913967ac4ab6eb06210e8681 | 8,164 | py | Python | userbot/plugins/fontstyles.py | anandhu-dev/catuserbot | 0ae10db978c1a9bf3f4f0da991a86d85fc29c0f1 | [
"MIT"
] | 2 | 2020-05-22T16:24:42.000Z | 2020-05-22T16:31:37.000Z | userbot/plugins/fontstyles.py | himawari27/catuserbot | 1a395bf197a68c795eb0d5114718fb28c40b0315 | [
"MIT"
] | 1 | 2021-02-08T20:47:53.000Z | 2021-02-08T20:47:53.000Z | userbot/plugins/fontstyles.py | himawari27/catuserbot | 1a395bf197a68c795eb0d5114718fb28c40b0315 | [
"MIT"
] | 3 | 2021-03-01T07:44:15.000Z | 2021-06-06T13:28:11.000Z |
import re
import time
import requests
from telethon import events
from userbot import CMD_HELP
from userbot.utils import register
import asyncio
import random
EMOJIS = [
"😂",
"😂",
"👌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"😩",
"😏",
"😞",
"👀",
"👅",
"😩",
"🤒",
"😳",
"🤯",
"😵",
"🥵",
"🤒",
"😠",
"😪",
"😴",
"🤤",
"👿",
"👽",
"😏",
"😒",
"😣",
"🤔",
"🤨",
"🧐",
"😝",
"🤪",
"🤩",
"☺️",
"😭",
"🥺",
]
ZALG_LIST = [["̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
@register(outgoing=True, pattern="^.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
if not vpr.text[0].isalpha() and vpr.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await vpr.edit("`Give some text for vapor!`")
return
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern="^.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
if not stret.text[0].isalpha() and stret.text[0] not in ("/", "#", "@", "!"):
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`")
return
count = random.randint(3, 10)
reply_text = re.sub(
r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])",
(r"\1"*count),
message
)
await stret.edit(reply_text)
@register(outgoing=True, pattern="^.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
if not zgfy.text[0].isalpha() and zgfy.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await zgfy.edit(
"`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`"
)
return
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
randint = random.randint(0, 2)
if randint == 0:
charac = charac.strip() + \
random.choice(ZALG_LIST[0]).strip()
elif randint == 1:
charac = charac.strip() + \
random.choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
random.choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern="^.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
if not cp_e.text[0].isalpha() and cp_e.text[0] not in ("/", "#", "@", "!"):
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`")
return
reply_text = random.choice(EMOJIS)
b_char = random.choice(
message
).lower() # choose a random character in the message to be substituted with 🅱️
for owo in message:
if owo == " ":
reply_text += random.choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += random.choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(random.getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += random.choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern="^.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
if not mock.text[0].isalpha() and mock.text[0] not in ("/", "#", "@", "!"):
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await mock.edit("`gIvE sOMEtHInG tO MoCk!`")
return
for charac in message:
if charac.isalpha() and random.randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
CMD_HELP.update({
"fontstyles": ".cp (text) or .cp reply to message \
\nUsage: inserts some emojis in between the texts\
\n\n.vapor (text) or .vapor reply to message \
\nUsage: Vaporize the given text. \
\n\n.str (text) or .str reply to message \
\nUsage: Stretchs the given message.\
\n\n.zal (text) or .zal reply to message \
\nUsage: Invoke the feeling of chaos.\
\n\n.mock (text) or .mock reply to message \
\nUsage: random capital and small letters in given text.\
"
})
| 24.297619 | 87 | 0.3573 | 0 | 0 | 0 | 0 | 5,019 | 0.58688 | 4,752 | 0.555659 | 2,040 | 0.238541 |
617125f168844e031dc3dc197fac38fb76b23ec5 | 16,175 | py | Python | test/api/drawing/test_drawing_objects.py | rizwanniazigroupdocs/aspose-words-cloud-python | b943384a1e3c0710cc84df74119e6edf7356037e | [
"MIT"
] | null | null | null | test/api/drawing/test_drawing_objects.py | rizwanniazigroupdocs/aspose-words-cloud-python | b943384a1e3c0710cc84df74119e6edf7356037e | [
"MIT"
] | null | null | null | test/api/drawing/test_drawing_objects.py | rizwanniazigroupdocs/aspose-words-cloud-python | b943384a1e3c0710cc84df74119e6edf7356037e | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="test_drawing_objects.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import os
import dateutil.parser
import asposewordscloud.models.requests
from test.base_test_context import BaseTestContext
#
# Example of how to get drawing objects.
#
class TestDrawingObjects(BaseTestContext):
#
# Test for getting drawing objects from document.
#
def test_get_document_drawing_objects(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjects.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectsRequest(name=remoteFileName, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_objects(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_objects, 'Validate GetDocumentDrawingObjects response')
self.assertIsNotNone(result.drawing_objects.list, 'Validate GetDocumentDrawingObjects response')
self.assertEqual(1, len(result.drawing_objects.list))
#
# Test for getting drawing objects from document without node path.
#
def test_get_document_drawing_objects_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectsWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectsRequest(name=remoteFileName, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_objects(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_objects, 'Validate GetDocumentDrawingObjectsWithoutNodePath response')
self.assertIsNotNone(result.drawing_objects.list, 'Validate GetDocumentDrawingObjectsWithoutNodePath response')
self.assertEqual(1, len(result.drawing_objects.list))
#
# Test for getting drawing object by specified index.
#
def test_get_document_drawing_object_by_index(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndex.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectByIndexRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_by_index(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate GetDocumentDrawingObjectByIndex response')
self.assertEqual(300.0, result.drawing_object.height)
#
# Test for getting drawing object by specified index without node path.
#
def test_get_document_drawing_object_by_index_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectByIndexRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_by_index(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate GetDocumentDrawingObjectByIndexWithoutNodePath response')
self.assertEqual(300.0, result.drawing_object.height)
#
# Test for getting drawing object by specified index and format.
#
def test_render_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithFormat.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.RenderDrawingObjectRequest(name=remoteFileName, format='png', index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.render_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object by specified index and format without node path.
#
def test_render_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectByIndexWithFormatWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.RenderDrawingObjectRequest(name=remoteFileName, format='png', index=0, folder=remoteDataFolder)
result = self.words_api.render_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for reading drawing object's image data.
#
def test_get_document_drawing_object_image_data(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectImageData.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectImageDataRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_image_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for reading drawing object's image data without node path.
#
def test_get_document_drawing_object_image_data_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestGetDocumentDrawingObjectImageDataWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectImageDataRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_image_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object OLE data.
#
def test_get_document_drawing_object_ole_data(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localDrawingFile = 'DocumentElements/DrawingObjects/sample_EmbeddedOLE.docx'
remoteFileName = 'TestGetDocumentDrawingObjectOleData.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localDrawingFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectOleDataRequest(name=remoteFileName, index=0, node_path='sections/0', folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_ole_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting drawing object OLE data without node path.
#
def test_get_document_drawing_object_ole_data_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localDrawingFile = 'DocumentElements/DrawingObjects/sample_EmbeddedOLE.docx'
remoteFileName = 'TestGetDocumentDrawingObjectOleDataWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localDrawingFile), 'rb'))
request = asposewordscloud.models.requests.GetDocumentDrawingObjectOleDataRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
result = self.words_api.get_document_drawing_object_ole_data(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for adding drawing object.
#
def test_insert_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestInsetDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectInsert(height=0.0, left=0.0, top=0.0, width=0.0, relative_horizontal_position='Margin', relative_vertical_position='Margin', wrap_type='Inline')
request = asposewordscloud.models.requests.InsertDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), node_path='', folder=remoteDataFolder)
result = self.words_api.insert_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate InsertDrawingObject response')
self.assertEqual('0.3.7.1', result.drawing_object.node_id)
#
# Test for adding drawing object without node path.
#
def test_insert_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestInsetDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectInsert(height=0.0, left=0.0, top=0.0, width=0.0, relative_horizontal_position='Margin', relative_vertical_position='Margin', wrap_type='Inline')
request = asposewordscloud.models.requests.InsertDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), folder=remoteDataFolder)
result = self.words_api.insert_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate InsertDrawingObjectWithoutNodePath response')
self.assertEqual('0.3.7.1', result.drawing_object.node_id)
#
# Test for deleting drawing object.
#
def test_delete_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestDeleteDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.DeleteDrawingObjectRequest(name=remoteFileName, index=0, node_path='', folder=remoteDataFolder)
self.words_api.delete_drawing_object(request)
#
# Test for deleting drawing object without node path.
#
def test_delete_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestDeleteDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.DeleteDrawingObjectRequest(name=remoteFileName, index=0, folder=remoteDataFolder)
self.words_api.delete_drawing_object(request)
#
# Test for updating drawing object.
#
def test_update_drawing_object(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestUpdateDrawingObject.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectUpdate(left=1.0)
request = asposewordscloud.models.requests.UpdateDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), index=0, node_path='', folder=remoteDataFolder)
result = self.words_api.update_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate UpdateDrawingObject response')
self.assertEqual(1.0, result.drawing_object.left)
#
# Test for updating drawing object without node path.
#
def test_update_drawing_object_without_node_path(self):
remoteDataFolder = self.remote_test_folder + '/DocumentElements/DrawingObjectss'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestUpdateDrawingObjectWithoutNodePath.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
requestDrawingObject = asposewordscloud.DrawingObjectUpdate(left=1.0)
request = asposewordscloud.models.requests.UpdateDrawingObjectRequest(name=remoteFileName, drawing_object=requestDrawingObject, image_file=open(os.path.join(self.local_test_folder, 'Common/aspose-cloud.png'), 'rb'), index=0, folder=remoteDataFolder)
result = self.words_api.update_drawing_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.drawing_object, 'Validate UpdateDrawingObjectWithoutNodePath response')
self.assertEqual(1.0, result.drawing_object.left)
| 53.559603 | 271 | 0.750108 | 14,612 | 0.903369 | 0 | 0 | 0 | 0 | 0 | 0 | 5,304 | 0.327913 |
617166834f93fc78ae610274ccab793d6d6b1c12 | 4,200 | py | Python | freqent/tests/azimuthal_average_tests/make_sphericalWaveTest.py | lab-of-living-matter/freqent | 210d8f25a59894d903c42d52e5475900303f9631 | [
"Apache-2.0"
] | 5 | 2021-01-16T01:39:39.000Z | 2022-01-19T00:23:38.000Z | freqent/tests/azimuthal_average_tests/make_sphericalWaveTest.py | lab-of-living-matter/freqent | 210d8f25a59894d903c42d52e5475900303f9631 | [
"Apache-2.0"
] | null | null | null | freqent/tests/azimuthal_average_tests/make_sphericalWaveTest.py | lab-of-living-matter/freqent | 210d8f25a59894d903c42d52e5475900303f9631 | [
"Apache-2.0"
] | 1 | 2021-08-20T15:03:30.000Z | 2021-08-20T15:03:30.000Z | import numpy as np
import matplotlib.pyplot as plt
import freqent.freqentn as fen
import dynamicstructurefactor.sqw as sqw
from itertools import product
import os
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
savepath = '/media/daniel/storage11/Dropbox/LLM_Danny/frequencySpaceDissipation/tests/freqentn_tests/'
plt.close('all')
def create_sphericalWave(wavelength, period, phi,
v=[0, 0],
n_txy=[100, 100, 100],
max_txy=[1, 1, 1],
r0=[0, 0]):
'''
Inputs
------
wavelength : float
wavelength of spherical wave
period : float
period of spherical wave
phi : float
initial phase of wave
v : array-like
drift velocity of wave, in format [vx, vy]
n_txy : list
list of integers for number of time points, x points, y points
max_txy : list
list of floats for total time and total length in x and y dimensions
r0 : array-like
initial position of spherical wave
'''
n_txy = np.asarray(n_txy)
max_txy = np.asarray(max_txy)
sample_spacing = max_txy / n_txy
tArr = np.linspace(0, max_txy[0], n_txy[0])
xArr = np.linspace(-max_txy[1] / 2, max_txy[1] / 2, n_txy[1])
yArr = np.linspace(-max_txy[2] / 2, max_txy[2] / 2, n_txy[2])
t, x, y = np.meshgrid(tArr, xArr, yArr, indexing='ij')
k = 2 * np.pi / wavelength
w = 2 * np.pi / period
r = np.sqrt((x - r0[0] - (v[0] * t))**2 + (y - r0[1] - (v[1] * t))**2)
wave = np.cos(k * r - w * t + phi)
return wave, t, x, y
# Set up parameters
xmax = 6 * np.pi # total distance in physical units
ymax = 6 * np.pi
tmax = 100
nx = 250 # total number of pixels across
ny = 250
nt = 100
dx = xmax / nx # sampling spacing
dy = ymax / ny
dt = tmax / nt
xArr = np.linspace(-xmax / 2, xmax / 2, nx)
yArr = np.linspace(-ymax / 2, ymax / 2, ny)
tArr = np.linspace(0, tmax, nt)
# Set up grid in real space, remembering to multiply by the
# sampling periods in time and space
tt, xx, yy = np.meshgrid(tArr, xArr, yArr, indexing='ij')
# Spatial and temporal frequency (in radians/length or time)
lambda0 = np.pi / 6
k0 = 2 * np.pi / lambda0
T0 = 5
w0 = 2 * np.pi / T0
lambda1 = np.pi / 6
k1 = 2 * np.pi / lambda1
T1 = 5
w1 = 2 * np.pi / T1
# Center offset
x0 = 0 * dx
y0 = 0 * dy
x1 = 0 * dx
y1 = 0 * dy
# phase difference
phi = 1 * np.pi / 2
# Function and its power spectrum
r0 = ((xx - x0)**2 + (yy - y0)**2)**0.5
r1 = ((xx - x1)**2 + (yy - y1)**2)**0.5
r0t = np.cos(k0 * r0 - w0 * tt)
r1t = np.cos(k1 * r1 - w1 * tt + phi)
data = np.zeros((2, *r0t.shape))
data[0] = r0t
data[1] = r1t
c, freqs = fen.corr_matrix(data, sample_spacing=[dt, dx, dy])
c = fen._nd_gauss_smooth(c, stddev=[1, 2, 2])
idx_array = list(product(np.arange(2), repeat=2))
figReal, axReal = plt.subplots(2, 2, sharex=True, sharey=True)
figImag, axImag = plt.subplots(2, 2, sharex=True, sharey=True)
for idx in idx_array:
aziAvg_real, kr_real = sqw.azimuthal_average_3D(c[..., idx[0], idx[1]].real,
dx=2 * np.pi / xmax)
aziAvg_imag, kr_imag = sqw.azimuthal_average_3D(c[..., idx[0], idx[1]].imag,
dx=2 * np.pi / xmax)
axReal[idx[0], idx[1]].pcolormesh(kr_real, freqs[0], aziAvg_real, vmin=-1, vmax=15)
axImag[idx[0], idx[1]].pcolormesh(kr_imag, freqs[0], aziAvg_imag, vmin=-0.3, vmax=0.3)
axReal[1, 0].set(xlabel=r'$k$ (rad/um)', ylabel=r'$\omega$ (rad/s)')
axReal[0, 0].set(ylabel=r'$\omega$ (rad/s)')
axReal[1, 1].set(xlabel=r'$k$ (rad/um)')
figReal.suptitle(r'$\Re[\langle r_i(\mathbf{{k}}, \omega) r_j^*(\mathbf{{k}}, \omega) \rangle]$')
# figReal.savefig(os.path.join(savepath, 'sphericalWaveCSD_Real_smoothed_sigma1.pdf'), format='pdf')
axImag[1, 0].set(xlabel=r'$k$ (rad/um)', ylabel=r'$\omega$ (rad/s)')
axImag[0, 0].set(ylabel=r'$\omega$ (rad/s)')
axImag[1, 1].set(xlabel=r'$k$ (rad/um)')
figImag.suptitle(r'$\Im[\langle r_i(\mathbf{{k}}, \omega) r_j^*(\mathbf{{k}}, \omega) \rangle]$')
# figImag.savefig(os.path.join(savepath, 'sphericalWaveCSD_Imag_smoothed_sigma1.pdf'), format='pdf')
plt.show()
| 30.882353 | 102 | 0.602619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,444 | 0.34381 |
6172cb90d3acbe6eb936a5e0311837e45f59b5d1 | 1,217 | py | Python | setup.py | kimvanwyk/red-mail | 78dc763e5b5f09eed820a11233299dd2c7b0190b | [
"MIT"
] | null | null | null | setup.py | kimvanwyk/red-mail | 78dc763e5b5f09eed820a11233299dd2c7b0190b | [
"MIT"
] | null | null | null | setup.py | kimvanwyk/red-mail | 78dc763e5b5f09eed820a11233299dd2c7b0190b | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
import versioneer
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="redmail",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Mikael Koli",
author_email="koli.mikael@gmail.com",
url="https://github.com/Miksus/red-mail.git",
packages=find_packages(),
description="Email sending library",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Topic :: Communications :: Email",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Intended Audience :: Developers",
"Intended Audience :: Customer Service",
"Intended Audience :: Financial and Insurance Industry",
],
include_package_data=True, # for MANIFEST.in
python_requires='>=3.6.0',
install_requires = [
'jinja2',
],
)
| 30.425 | 64 | 0.637634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 601 | 0.493837 |
61741f74b759c1266c2b4eff5463940e41acbff2 | 1,691 | py | Python | wagtailcomments/basic/migrations/0001_initial.py | takeflight/wagtailcomments | 941ae2f16fccb867a33d2ed4c21b8e5e04af712d | [
"BSD-3-Clause"
] | 7 | 2016-09-28T10:51:44.000Z | 2018-09-29T08:27:23.000Z | wagtailcomments/basic/migrations/0001_initial.py | takeflight/wagtailcomments | 941ae2f16fccb867a33d2ed4c21b8e5e04af712d | [
"BSD-3-Clause"
] | null | null | null | wagtailcomments/basic/migrations/0001_initial.py | takeflight/wagtailcomments | 941ae2f16fccb867a33d2ed4c21b8e5e04af712d | [
"BSD-3-Clause"
] | 2 | 2017-05-21T08:41:19.000Z | 2018-08-06T13:50:59.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-29 06:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import enumchoicefield.fields
import wagtailcomments.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField()),
('user_name', models.CharField(blank=True, max_length=255, null=True)),
('user_email', models.EmailField(blank=True, max_length=254, null=True)),
('datetime', models.DateTimeField(default=django.utils.timezone.now)),
('ip_address', models.GenericIPAddressField()),
('status', enumchoicefield.fields.EnumChoiceField(enum_class=wagtailcomments.models.CommentStatus, max_length=10)),
('body', models.TextField()),
('object_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'swappable': 'WAGTAILCOMMENTS_MODEL',
},
),
]
| 40.261905 | 141 | 0.646954 | 1,379 | 0.815494 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.166174 |
61752f9c0020e5238d87c07d19b74f84c1ea9f90 | 4,286 | py | Python | tests_requre/service/test_views.py | IceWreck/packit-service | ab8a3ae7b7f078f4a5bf3465516c1abc894fe3dc | [
"MIT"
] | null | null | null | tests_requre/service/test_views.py | IceWreck/packit-service | ab8a3ae7b7f078f4a5bf3465516c1abc894fe3dc | [
"MIT"
] | 2 | 2020-09-02T08:14:27.000Z | 2020-09-03T03:16:27.000Z | tests_requre/service/test_views.py | IceWreck/packit-service | ab8a3ae7b7f078f4a5bf3465516c1abc894fe3dc | [
"MIT"
] | null | null | null | from flask import url_for
from flexmock import flexmock
from packit_service import models
from packit_service.models import CoprBuildModel
from packit_service.service.views import _get_build_info
from tests_requre.conftest import SampleValues
def test_get_build_logs_for_build_pr(clean_before_and_after, a_copr_build_for_pr):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
response = _get_build_info(a_copr_build_for_pr, build_description="COPR build")
assert "We can't find any info" not in response
assert "Builds for the-namespace/the-repo-name: PR #342" in response
assert "2020-05-19 16:17:14 UTC" in response
assert a_copr_build_for_pr.status in response
assert a_copr_build_for_pr.target in response
assert str(a_copr_build_for_pr.srpm_build_id) in response
assert a_copr_build_for_pr.build_logs_url in response
def test_get_build_logs_for_build_branch_push(
clean_before_and_after, a_copr_build_for_branch_push
):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
response = _get_build_info(
a_copr_build_for_branch_push, build_description="COPR build"
)
assert "We can't find any info" not in response
assert "Builds for the-namespace/the-repo-name: branch build-branch" in response
assert "2020-05-19 16:17:14 UTC" in response
assert a_copr_build_for_branch_push.status in response
assert a_copr_build_for_branch_push.target in response
assert str(a_copr_build_for_branch_push.srpm_build_id) in response
assert a_copr_build_for_branch_push.build_logs_url in response
def test_get_build_logs_for_build_release(
clean_before_and_after, a_copr_build_for_release
):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
response = _get_build_info(a_copr_build_for_release, build_description="COPR build")
assert "We can't find any info" not in response
assert "Builds for the-namespace/the-repo-name: release v1.0.2" in response
assert "2020-05-19 16:17:14 UTC" in response
assert a_copr_build_for_release.status in response
assert a_copr_build_for_release.target in response
assert str(a_copr_build_for_release.srpm_build_id) in response
assert a_copr_build_for_release.build_logs_url in response
def test_srpm_logs_view(client, clean_before_and_after, srpm_build_model):
# Logs view uses the id of the SRPMBuildModel not CoprBuildModel
response = client.get(
url_for("builds.get_srpm_build_logs_by_id", id_=srpm_build_model.id)
)
response = response.data.decode()
assert "SRPM build logs" in response
assert str(srpm_build_model.id) in response
assert "some\nboring\nlogs" in response
def test_copr_build_info_view(client, clean_before_and_after, multiple_copr_builds):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
build = CoprBuildModel.get_by_build_id(123456, SampleValues.chroots[0])
build.set_build_logs_url(
"https://copr.somewhere/results/owner/package/target/build.logs"
)
response = client.get(url_for("builds.copr_build_info", id_=str(build.id)))
response = response.data.decode()
assert "Builds for the-namespace/the-repo-name: PR #342" in response
assert "2020-05-19 16:17:14 UTC" in response
assert build.status in response
assert build.target in response
assert str(build.srpm_build_id) in response
assert build.build_logs_url in response
def test_koji_build_info_view(client, clean_before_and_after, a_koji_build_for_pr):
flexmock(models).should_receive("optional_time").and_return(
"2020-05-19 16:17:14 UTC"
)
response = client.get(
url_for("builds.koji_build_info", id_=str(a_koji_build_for_pr.id))
)
response = response.data.decode()
assert "Builds for the-namespace/the-repo-name: PR #342" in response
assert "2020-05-19 16:17:14 UTC" in response
assert a_koji_build_for_pr.status in response
assert a_koji_build_for_pr.target in response
assert str(a_koji_build_for_pr.srpm_build_id) in response
assert a_koji_build_for_pr.build_logs_url in response
| 39.321101 | 88 | 0.765749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 944 | 0.220252 |
617841c4391105795ebe61db8ed27da43f2a59f5 | 1,644 | py | Python | watchapp/urls.py | kepha-okari/the-watch | 80720ad4ffc394e5acbca0fbf1fc9f1dbff0f56f | [
"MIT"
] | null | null | null | watchapp/urls.py | kepha-okari/the-watch | 80720ad4ffc394e5acbca0fbf1fc9f1dbff0f56f | [
"MIT"
] | 7 | 2020-02-11T23:48:54.000Z | 2022-03-11T23:16:45.000Z | watchapp/urls.py | kepha-okari/the-watch | 80720ad4ffc394e5acbca0fbf1fc9f1dbff0f56f | [
"MIT"
] | null | null | null | from django.conf.urls import url,include
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^$',views.index,name='hoodNews'),
url(r'^new/hood/',views.create_hood, name='newHood'),
url(r'^all/hoods/',views.view_neighborhoods, name='allHoods'),
url(r'^neighborhood/(\d+)',views.hood_details, name='pickHood'),
url(r'^new/post/',views.post_message, name='message'),
url(r'^new/business/',views.create_business, name='newBusiness'),
url(r'^business/(\d+)',views.business_details, name='business'),
url(r'^create/profile/', views.create_profile, name='createProfile'),
url(r'^follow/(\d+)', views.follow, name='follow'),
url(r'^unfollow/(\d+)', views.unfollow, name='unfollow'),
# url(r'^other/profile/(\d+)',views.other_profile, name='otherProfile'),
# url(r'^post/',views.new_post, name='postImage'),
# url(r'^manage/(\d+)',views.manage_image, name='manageImage'),
# url(r'^comment/(\d+)', views.new_comment, name='Comment'),
# url(r'^single/image/(\d+)', views.single_image, name='singleImage'),
# url(r'^follow/(\d+)', views.follow, name="follow"),
# url(r'^delete/post/(\d+)', views.delete_post, name="removePost"),
# url(r'^unfollow/(\d+)', views.unfollow, name="unfollow"),
# url(r'^like/(\d+)', views.like, name="like"),
# url(r'^update/profile/', views.create_profile, name="createProfile"),
# url(r'^search/', views.search_results, name='search_results'),
url(r'^accounts/', include('registration.backends.simple.urls')),
]
| 51.375 | 76 | 0.666058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 983 | 0.597932 |
6178677cf010170ac4ef5c808891d12fe972a547 | 1,088 | py | Python | shopyoapi/uploads.py | Maurilearn/learnings | 0af03e5646c9053b3cfc27465983ce466ad2f3cb | [
"MIT"
] | null | null | null | shopyoapi/uploads.py | Maurilearn/learnings | 0af03e5646c9053b3cfc27465983ce466ad2f3cb | [
"MIT"
] | null | null | null | shopyoapi/uploads.py | Maurilearn/learnings | 0af03e5646c9053b3cfc27465983ce466ad2f3cb | [
"MIT"
] | null | null | null |
from werkzeug.security import generate_password_hash
from shopyoapi.init import db
from app import app
from modules.auth.models import User
from modules.school.models import Setting
# from modules.settings.models import Settings
def add_admin(name, email, password):
with app.app_context():
admin = User(
name=name,
email=email,
role='admin'
)
admin.set_hash(password)
admin.insert()
print('[x] added admin:', name, email, password)
'''
def add_setting(name, value):
with app.app_context():
if Settings.query.filter_by(setting=name).first():
s = Settings.query.get(name)
s.value = value
db.session.commit()
else:
s = Settings(setting=name, value=value)
db.session.add(s)
db.session.commit()
'''
def add_setting(name, value):
with app.app_context():
s = Setting(
name=name,
value=value)
s.insert()
print('[x] Added name:{} with value:{}'.format(name, value)) | 26.536585 | 68 | 0.59375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 457 | 0.420037 |
6179a809c6fd4ff96baedddbf9bb9dae658ef8c7 | 1,256 | py | Python | neutron_plugin_contrail/plugins/opencontrail/neutron_middleware.py | hamzazafar/contrail-neutron-plugin | fb8dbcabc8240e5c47753ae6e2af5556d0a38421 | [
"Apache-2.0"
] | 3 | 2021-09-07T05:02:24.000Z | 2022-02-11T04:25:43.000Z | neutron_plugin_contrail/plugins/opencontrail/neutron_middleware.py | hamzazafar/contrail-neutron-plugin | fb8dbcabc8240e5c47753ae6e2af5556d0a38421 | [
"Apache-2.0"
] | 1 | 2016-04-26T09:05:42.000Z | 2016-04-26T09:05:42.000Z | neutron_plugin_contrail/plugins/opencontrail/neutron_middleware.py | hamzazafar/contrail-neutron-plugin | fb8dbcabc8240e5c47753ae6e2af5556d0a38421 | [
"Apache-2.0"
] | 5 | 2020-07-14T07:52:05.000Z | 2022-03-24T15:08:02.000Z | import logging
from eventlet import corolocal
from eventlet.greenthread import getcurrent
"""
This middleware is used to forward user token to Contrail API server.
Middleware is inserted at head of Neutron pipeline via api-paste.ini file
so that user token can be preserved in local storage of Neutron thread.
This is needed because neutron will later remove the user token before control
finally reaches Contrail plugin. Contrail plugin will retreive the user token
from thread's local storage and pass it to API server via X-AUTH-TOKEN header.
"""
class UserToken(object):
def __init__(self, app, conf):
self._logger = logging.getLogger(__name__)
self._app = app
self._conf = conf
def __call__(self, env, start_response):
# preserve user token for later forwarding to contrail API server
cur_greenlet = getcurrent()
cur_greenlet.contrail_vars = corolocal.local()
cur_greenlet.contrail_vars.token = env.get('HTTP_X_AUTH_TOKEN')
return self._app(env, start_response)
def token_factory(global_conf, **local_conf):
"""Paste factory."""
conf = global_conf.copy()
conf.update(local_conf)
def _factory(app):
return UserToken(app, conf)
return _factory
| 32.205128 | 78 | 0.731688 | 489 | 0.389331 | 0 | 0 | 0 | 0 | 0 | 0 | 563 | 0.448248 |
617b72d47fe1a620e75af426de3f519c6430df3c | 858 | py | Python | scofield/tax/models.py | howiworkdaily/scofield-project | f0daaf785c344a0da1f5b624518c9fa6c0514745 | [
"BSD-3-Clause"
] | 4 | 2016-04-10T13:37:58.000Z | 2018-06-11T18:49:29.000Z | scofield/tax/models.py | howiworkdaily/scofield-project | f0daaf785c344a0da1f5b624518c9fa6c0514745 | [
"BSD-3-Clause"
] | null | null | null | scofield/tax/models.py | howiworkdaily/scofield-project | f0daaf785c344a0da1f5b624518c9fa6c0514745 | [
"BSD-3-Clause"
] | 2 | 2015-04-08T19:52:19.000Z | 2021-02-10T08:08:19.000Z | from django.db import models
class TaxClass(models.Model):
"""
Tax rate for a product.
"""
title = models.CharField(max_length=100)
description = models.CharField(max_length=200, help_text='Description of products to be taxed')
def __unicode__(self):
return self.title
class Meta:
verbose_name = "Tax Class"
verbose_name_plural = "Tax Classes"
class Tax(models.Model):
"""
Tax Percentage
"""
taxclass = models.ForeignKey(TaxClass)
percentage = models.DecimalField(max_digits=7, decimal_places=2)
def _display_percentage(self):
return "%#2.2f%%" % (100*self.percentage)
_display_percentage.short_description = 'Percentage'
display_percentage = property(_display_percentage)
class Meta:
verbose_name = "Tax"
verbose_name_plural = "Tax"
| 25.235294 | 99 | 0.670163 | 823 | 0.959207 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.188811 |
617c68a4d53862bcf891011b1dad1759a3111e16 | 1,114 | py | Python | helpers/sett/resolvers/StrategySushiDiggWbtcLpOptimizerResolver.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 99 | 2020-12-02T08:40:48.000Z | 2022-03-15T05:21:06.000Z | helpers/sett/resolvers/StrategySushiDiggWbtcLpOptimizerResolver.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 115 | 2020-12-15T07:15:39.000Z | 2022-03-28T22:21:03.000Z | helpers/sett/resolvers/StrategySushiDiggWbtcLpOptimizerResolver.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 56 | 2020-12-11T06:50:04.000Z | 2022-02-21T09:17:38.000Z | from brownie import interface
from rich.console import Console
from helpers.utils import snapBalancesMatchForToken
from .StrategyBaseSushiResolver import StrategyBaseSushiResolver
console = Console()
class StrategySushiDiggWbtcLpOptimizerResolver(StrategyBaseSushiResolver):
def confirm_rebase(self, before, after, value):
"""
Lp token balance should stay the same.
Sushi balances stay the same.
xSushi balances stay the same.
"""
super().confirm_rebase(before, after, value)
assert snapBalancesMatchForToken(before, after, "want")
assert snapBalancesMatchForToken(before, after, "sushi")
assert snapBalancesMatchForToken(before, after, "xsushi")
def add_balances_snap(self, calls, entities):
calls = super().add_balances_snap(calls, entities)
strategy = self.manager.strategy
digg = interface.IERC20(strategy.digg())
calls = self.add_entity_balances_for_tokens(calls, "digg", digg, entities)
calls = self.add_entity_shares_for_tokens(calls, "digg", digg, entities)
return calls
| 35.935484 | 82 | 0.719928 | 909 | 0.815978 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.154399 |
61800d60ef6f519972904d1a00c8aac0445bd6de | 8,161 | py | Python | Section 8/4/4/Ej7.4/UCBExp.py | marcosherreroa/Aplicaciones-de-los-algoritmos-bandidos | ee93f8f401440de05d56127acb5c75fcc2c92ef2 | [
"MIT"
] | null | null | null | Section 8/4/4/Ej7.4/UCBExp.py | marcosherreroa/Aplicaciones-de-los-algoritmos-bandidos | ee93f8f401440de05d56127acb5c75fcc2c92ef2 | [
"MIT"
] | null | null | null | Section 8/4/4/Ej7.4/UCBExp.py | marcosherreroa/Aplicaciones-de-los-algoritmos-bandidos | ee93f8f401440de05d56127acb5c75fcc2c92ef2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""""
Bandidos estocásticos: introducción, algoritmos y experimentos
TFG Informática
Sección 8.4.4
Figuras 26, 27 y 28
Autor: Marcos Herrero Agustín
"""
import math
import random
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
def computemTeor(n,Delta):
if Delta == 0:
return 0
else :
return max(1,math.ceil(4/(Delta*Delta)*math.log(n*Delta*Delta/4)))
def computemOpt(n,Delta):
expectedRegret = np.empty(n//2+1)
X = stats.norm(0,1)
expectedRegret[0] = 0.5*n*Delta
for m in range(1,n//2+1):
expectedRegret[m] = m*Delta+(n-m)*Delta*X.cdf(-m*Delta/math.sqrt(2*m))
mOpt = min(range(n//2+1),key = lambda i: expectedRegret[i])
return mOpt
def samplePseudoRegretEF(n,k,m,arms,gaps):
rwds = k*[0]
for i in range(m):
for j in range(k):
rwds[j] += arms[j].rvs()
maximum = max(rwds)
bestarm = random.choice([i for i in range(k) if rwds[i] == maximum])
return m*sum(gaps)+(n-m*k)*gaps[bestarm]
def samplePseudoRegretUCB(n,k,delta,arms,gaps):#cambiar a pseudo
T = k*[0] # número de veces que se ha elegido cada brazo
meanReward = k*[0] # media muestral de las recompensas obtenidas por cada brazo
UCB = k*[np.inf] # cota superior de confianza de cada brazo
regret = 0
for i in range(n):
chosenArm = max(range(k),key=lambda i: UCB[i])
rwd = arms[chosenArm].rvs()
meanReward[chosenArm] = T[chosenArm]/(T[chosenArm]+1)*meanReward[chosenArm] \
+ rwd/(T[chosenArm]+1)
T[chosenArm] +=1
UCB[chosenArm] = meanReward[chosenArm] + math.sqrt((2*math.log(1/delta))/T[chosenArm])
regret += gaps[chosenArm]
return regret
def plotDeltaRegret():
n = 1000
sampleNum = 600
arms = 2*[None]
arms[0] = stats.norm(0,1)
gaps = 2*[0]
nDeltas = 20
Deltas = np.linspace(0,1,nDeltas)
regretEF25 = np.empty(nDeltas)
regretEF50 = np.empty(nDeltas)
regretEF75 = np.empty(nDeltas)
regretEF100 = np.empty(nDeltas)
regretEFmTeor = np.empty(nDeltas)
regretEFOptimo = np.empty(nDeltas)
regretUCB = np.empty(nDeltas)
mTeor = nDeltas*[0]
mOpt = nDeltas*[0]
for i in range(nDeltas):
Delta = Deltas[i]
arms[1]= stats.norm(-Delta,1)
gaps[1] = Delta
regretEF25[i] = 0
for k in range(sampleNum):
regretEF25[i] += samplePseudoRegretEF(n,2,25,arms,gaps)
regretEF25[i] /= sampleNum
regretEF50[i] = 0
for k in range(sampleNum):
regretEF50[i] += samplePseudoRegretEF(n,2,50,arms,gaps)
regretEF50[i] /= sampleNum
regretEF75[i] = 0
for k in range(sampleNum):
regretEF75[i] += samplePseudoRegretEF(n,2,75,arms,gaps)
regretEF75[i] /= sampleNum
regretEF100[i] = 0
for k in range(sampleNum):
regretEF100[i] += samplePseudoRegretEF(n,2,100,arms,gaps)
regretEF100[i] /= sampleNum
regretEFmTeor[i]= 0
mTeor[i] = computemTeor(n,Delta)
for k in range(sampleNum):
regretEFmTeor[i] += samplePseudoRegretEF(n,2,mTeor[i],arms,gaps)
regretEFmTeor[i] /= sampleNum
regretEFOptimo[i] = 0
mOpt[i] = computemOpt(n,Delta)
for k in range(sampleNum):
regretEFOptimo[i] += samplePseudoRegretEF(n,2,mOpt[i],arms,gaps)
regretEFOptimo[i] /= sampleNum
regretUCB[i] = 0
for k in range(sampleNum):
regretUCB[i] += samplePseudoRegretUCB(n,2,1/(n*n),arms,gaps)
regretUCB[i] /= sampleNum
fig = plt.figure()
plt.plot(Deltas,regretEF25, color='tab:blue',label= 'EP (m = 25)')
plt.plot(Deltas,regretEF50, color='tab:green',label = 'EP (m = 50)')
plt.plot(Deltas,regretEF75, color='tab:olive',label = 'EP (m = 75)')
plt.plot(Deltas,regretEF100, color='tab:red', label = 'EP (m = 100)')
plt.plot(Deltas,regretEFmTeor, color='tab:purple',label = 'EP (m = m_Teor)')
plt.plot(Deltas,regretEFOptimo, color='tab:gray', label = 'EP (m = m_Opt)')
plt.plot(Deltas,regretUCB, color='black', label = 'UCB')
plt.xlabel('∆')
plt.ylabel('Remordimiento esperado')
plt.legend(loc='upper left',ncol = 2)
fig.savefig('UCBDeltaRegret.pdf',format='pdf')
plt.show()
fig = plt.figure()
plt.plot(Deltas, mTeor, color='tab:purple', label = 'm_Teor')
plt.plot(Deltas,mOpt, color = 'tab:gray', label = 'm_Opt')
plt.xlabel('∆')
plt.ylabel('m')
plt.legend(loc='upper left')
fig.savefig('ms.pdf',format='pdf')
plt.show()
def plotDeltaRegret2():
n = 1000
sampleNum = 600
arms = 2*[None]
arms[0] = stats.norm(0,1)
gaps = 2*[0]
nDeltas = 20
Deltas = np.linspace(0,1,nDeltas)
regretEF25 = np.empty(nDeltas)
regretEF100 = np.empty(nDeltas)
regretEFOptimo = np.empty(nDeltas)
regretUCB0 = np.empty(nDeltas)
regretUCB2 = np.empty(nDeltas)
regretUCB4 = np.empty(nDeltas)
regretUCB6 = np.empty(nDeltas)
regretUCB8 = np.empty(nDeltas)
mOpt = nDeltas*[0]
for i in range(nDeltas):
Delta = Deltas[i]
arms[1]= stats.norm(-Delta,1)
gaps[1] = Delta
regretEF25[i] = 0
for k in range(sampleNum):
regretEF25[i] += samplePseudoRegretEF(n,2,25,arms,gaps)
regretEF25[i] /= sampleNum
regretEF100[i] = 0
for k in range(sampleNum):
regretEF100[i] += samplePseudoRegretEF(n,2,100,arms,gaps)
regretEF100[i] /= sampleNum
regretEFOptimo[i] = 0
mOpt[i] = computemOpt(n,Delta)
for k in range(sampleNum):
regretEFOptimo[i] += samplePseudoRegretEF(n,2,mOpt[i],arms,gaps)
regretEFOptimo[i] /= sampleNum
regretUCB0[i] = 0
for k in range(sampleNum):
regretUCB0[i] += samplePseudoRegretUCB(n,2,1,arms,gaps)
regretUCB0[i] /= sampleNum
regretUCB2[i] = 0
for k in range(sampleNum):
regretUCB2[i] += samplePseudoRegretUCB(n,2,1/100,arms,gaps)
regretUCB2[i] /= sampleNum
regretUCB4[i] = 0
for k in range(sampleNum):
regretUCB4[i] += samplePseudoRegretUCB(n,2,1/10000,arms,gaps)
regretUCB4[i] /= sampleNum
regretUCB6[i] = 0
for k in range(sampleNum):
regretUCB6[i] += samplePseudoRegretUCB(n,2,1/(n*n),arms,gaps)
regretUCB6[i] /= sampleNum
regretUCB8[i] = 0
for k in range(sampleNum):
regretUCB8[i] += samplePseudoRegretUCB(n,2,1/(10**8),arms,gaps)
regretUCB8[i] /= sampleNum
fig = plt.figure()
plt.plot(Deltas,regretEF25, color='tab:blue',label= 'EP (m = 25)')
plt.plot(Deltas,regretEF100, color='tab:red', label = 'EP (m = 100)')
plt.plot(Deltas,regretEFOptimo, color='tab:gray', label = 'EP (m = m_Opt)')
plt.plot(Deltas,regretUCB0, color='salmon', label = 'UCB (δ = 1)')
plt.plot(Deltas,regretUCB2, color='gold', label = 'UCB (δ = 1/100)')
plt.plot(Deltas,regretUCB4, color='mediumspringgreen', label = 'UCB (δ = 1/10⁴)')
plt.plot(Deltas,regretUCB6, color='black', label = 'UCB (δ = 1/10⁶)')
plt.plot(Deltas,regretUCB8, color='indigo', label = 'UCB (δ = 1/10⁸)')
plt.xlabel('∆')
plt.ylabel('Remordimiento esperado')
plt.legend(loc='upper left',ncol = 2)
fig.savefig('UCBDeltaRegret2.pdf',format='pdf')
plt.show()
plotDeltaRegret()
#plotDeltaRegret2()
| 31.268199 | 95 | 0.556304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 952 | 0.116325 |
618075f97fd8bcbc379afe1ad839a7ec6ec22b97 | 801 | py | Python | tools/code_coverage/package/oss/cov_json.py | deltabravozulu/pytorch | c6eef589971e45bbedacc7f65533d1b8f80a6895 | [
"Intel"
] | 206 | 2020-11-28T22:56:38.000Z | 2022-03-27T02:33:04.000Z | tools/code_coverage/package/oss/cov_json.py | deltabravozulu/pytorch | c6eef589971e45bbedacc7f65533d1b8f80a6895 | [
"Intel"
] | 19 | 2020-12-09T23:13:14.000Z | 2022-01-24T23:24:08.000Z | tools/code_coverage/package/oss/cov_json.py | deltabravozulu/pytorch | c6eef589971e45bbedacc7f65533d1b8f80a6895 | [
"Intel"
] | 28 | 2020-11-29T15:25:12.000Z | 2022-01-20T02:16:27.000Z | from ..tool import clang_coverage
from ..util.setting import CompilerType, Option, TestList, TestPlatform
from ..util.utils import check_compiler_type
from .init import detect_compiler_type
from .run import clang_run, gcc_run
def get_json_report(test_list: TestList, options: Option):
cov_type = detect_compiler_type()
check_compiler_type(cov_type)
if cov_type == CompilerType.CLANG:
# run
if options.need_run:
clang_run(test_list)
# merge && export
if options.need_merge:
clang_coverage.merge(test_list, TestPlatform.OSS)
if options.need_export:
clang_coverage.export(test_list, TestPlatform.OSS)
elif cov_type == CompilerType.GCC:
# run
if options.need_run:
gcc_run(test_list)
| 33.375 | 71 | 0.694132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.033708 |
618142523ce1ce96219a613cc5fd09c36753e7ac | 843 | py | Python | metaworld/envs/asset_path_utils.py | vinnibuh/metaworld | e394aea9f000fe3e3778a4fd40bfb9e806752341 | [
"MIT"
] | null | null | null | metaworld/envs/asset_path_utils.py | vinnibuh/metaworld | e394aea9f000fe3e3778a4fd40bfb9e806752341 | [
"MIT"
] | null | null | null | metaworld/envs/asset_path_utils.py | vinnibuh/metaworld | e394aea9f000fe3e3778a4fd40bfb9e806752341 | [
"MIT"
] | 1 | 2021-10-07T21:56:07.000Z | 2021-10-07T21:56:07.000Z | import os
import xml.etree.ElementTree as ET
from tempfile import NamedTemporaryFile
ENV_ASSET_DIR_V1 = os.path.join(os.path.dirname(__file__), 'assets_v1')
ENV_ASSET_DIR_V2 = os.path.join(os.path.dirname(__file__), 'assets_v2')
def full_v1_path_for(file_name):
return os.path.join(ENV_ASSET_DIR_V1, file_name)
def full_v2_path_for(file_name, transparent_sawyer=False):
path = os.path.join(ENV_ASSET_DIR_V2, file_name)
if not transparent_sawyer:
return path
fold, file_path = os.path.split(path)
file_path = f"{file_path[:-len('.xml')]}_transparent_sawyer.xml"
new_path = os.path.join(fold, file_path)
tree = ET.parse(path)
tree.getroot() \
.find('worldbody').find('include') \
.set('file', '../objects/assets/xyz_base_transparent.xml')
tree.write(new_path)
return new_path
| 32.423077 | 71 | 0.720047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.170819 |
6182beb680bd2a5d8ef6901a320c44fa56fbc31b | 5,458 | py | Python | landcover_change_application/lccs_l3.py | opendatacube/datacube-conference-2019 | 4c5a0f77c239c550ba1ae6fbc2e2098947e3db36 | [
"Apache-2.0"
] | 5 | 2019-02-18T00:44:38.000Z | 2019-02-26T17:57:40.000Z | landcover_change_application/lccs_l3.py | opendatacube/datacube-conference-2019 | 4c5a0f77c239c550ba1ae6fbc2e2098947e3db36 | [
"Apache-2.0"
] | null | null | null | landcover_change_application/lccs_l3.py | opendatacube/datacube-conference-2019 | 4c5a0f77c239c550ba1ae6fbc2e2098947e3db36 | [
"Apache-2.0"
] | 3 | 2019-02-11T02:44:04.000Z | 2020-12-21T22:56:33.000Z | """
LCCS Level 3 Classification
| Class name | Code | Numeric code |
|----------------------------------|-----|-----|
| Cultivated Terrestrial Vegetated | A11 | 111 |
| Natural Terrestrial Vegetated | A12 | 112 |
| Cultivated Aquatic Vegetated | A23 | 123 |
| Natural Aquatic Vegetated | A24 | 124 |
| Artificial Surface | B15 | 215 |
| Natural Surface | B16 | 216 |
| Artificial Water | B27 | 227 |
| Natural Water | B28 | 228 |
"""
import logging
import numpy
#: Required input variables
LCCS_L3_REQUIRED_VARIABLES = ["vegetat_veg_cat",
"aquatic_wat_cat",
"cultman_agr_cat",
"artific_urb_cat",
"artwatr_wat_cat"]
#: LCCS Level 3 Colour Scheme
LCCS_L3_COLOUR_SCHEME = {111 : (192, 255, 0, 255),
112 : (0, 128, 0, 255),
123 : (0, 255, 245, 255),
124 : (0, 192, 122, 255),
215 : (255, 0, 255, 255),
216 : (255, 192, 160, 255),
227 : (0, 155, 255, 255),
228 : (0, 0, 255, 255)}
def colour_lccs_level3(classification_array):
""""
Colour classification array using LCCS Level 3 standard
colour scheme. Returns four arays:
* red
* green
* blue
* alpha
"""
red = numpy.zeros_like(classification_array, dtype=numpy.uint8)
green = numpy.zeros_like(red)
blue = numpy.zeros_like(red)
alpha = numpy.zeros_like(red)
for class_id, colours in LCCS_L3_COLOUR_SCHEME.items():
subset = (classification_array == class_id)
red[subset], green[subset], blue[subset], alpha[subset] = colours
return red, green, blue, alpha
def _check_required_variables(classification_data):
"""
Check requited variables are in xarray
"""
# Check all input variable exist - warning if they don't
for var in LCCS_L3_REQUIRED_VARIABLES:
if var not in classification_data.data_vars:
logging.warning("Required variable {0} not found".format(var))
def classify_lccs_level3(classification_data):
"""
Apply Level 3 LCCS Classification
Requires xarray containing the following variables
* vegetat_veg_cat - Binary mask 1=vegetation, 0=non-vegetation
* aquatic_wat_cat - Binary mask 1=aquatic, 0=non-aquatic
* cultman_agr_cat - Binary mask 1=cultivated/managed, 0=natural
* artific_urb_cat - Binary mask 1=urban, 0=non-urban
* artwatr_wat_cat - Binary mask 1=artificial water, 0=natural water
Returns three arrays:
* level1
* level2
* level3
"""
# Check required input and output variables exist.
_check_required_variables(classification_data)
# Set up arrays for outputs
try:
vegetation = classification_data["vegetat_veg_cat"].values == 1
except KeyError:
raise Exception("No data available for first level of classification "
"(vegetation / non-vegetation), can not proceed")
level3 = numpy.zeros(vegetation.shape, dtype=numpy.uint8)
# Level 1
# Assign level 1 class of primarily vegetated (A,100) or primarily non-vegetated (B,200)
level1 = numpy.where(vegetation, numpy.uint8(100), numpy.uint8(200))
# Level 2
# Assign level 2 class of terrestrial (10) or aquatic (20)
try:
aquatic = classification_data["aquatic_wat_cat"].values == 1
level2 = numpy.where(aquatic, numpy.uint8(20), numpy.uint8(10))
except KeyError:
raise Exception("No data available for second level of classification "
"(aquatic / non-aquatic), can not proceed")
# Level 3
# Assign level 3 (Supercategory) class based on cultivated or artificial
try:
cultivated = classification_data["cultman_agr_cat"].values == 1
# Cultivated Terrestrial Vegetation (A11)
level3[vegetation & ~aquatic & cultivated] = 111
# Cultivated Aquatic Vegetation (A23)
level3[vegetation & aquatic & cultivated] = 123
# Natural Terrestrial Vegetation (A12)
level3[vegetation & ~aquatic & ~cultivated] = 112
# Natural Aquatic Vegetation (A24)
level3[vegetation & aquatic & ~cultivated] = 124
except KeyError:
logging.warning("No cultivated vegetation layer available. Skipping "
"assigning level 3 catergories for vegetation")
try:
urban = classification_data["artific_urb_cat"].values == 1
# Artificial Surface (B15)
level3[~vegetation & ~aquatic & urban] = 215
# Natural Surface (B16)
level3[~vegetation & ~aquatic & ~urban] = 216
except KeyError:
logging.warning("No urban layer available. Skipping assigning "
"level 3 for terrestrial non-vegetation")
try:
artificial_water = classification_data["artwatr_wat_cat"].values == 1
# Artificial Water (B27)
level3[~vegetation & aquatic & artificial_water] = 227
# Natural Water (B28)
level3[~vegetation & aquatic & ~artificial_water] = 228
except KeyError:
logging.warning("No artificial water layer available. Skipping assigning "
"level 3 for aquatic non-vegetation (water)")
return level1, level2, level3
| 33.484663 | 92 | 0.611213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,594 | 0.475266 |
618446f9da1ab0931b796eb8d9862b418bee6a2b | 1,115 | py | Python | hackerearth/Algorithms/Buggy Bot/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerearth/Algorithms/Buggy Bot/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerearth/Algorithms/Buggy Bot/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict
n, m, k = map(int, input().strip().split())
adjacency = defaultdict(list)
for _ in range(m):
a, b = map(int, input().strip().split())
adjacency[a].append(b)
final = [False] * (n + 1)
for node in adjacency[1]:
final[node] = True
buggy = 1
neighboring_nodes = defaultdict(set)
for _ in range(k):
x, y = map(int, input().strip().split())
if final[x]:
final[x] = False
for pa in neighboring_nodes[x]:
adjacency[pa].append(x)
neighboring_nodes[x] = set()
final[y] = True
if buggy == x:
buggy = y
for node in adjacency[buggy]:
final[node] = True
neighboring_nodes[node].add(buggy)
adjacency[buggy] = []
final[buggy] = True
print(sum(final))
print(' '.join(str(i) for i in range(n + 1) if final[i] is True))
| 27.875 | 94 | 0.608072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.24843 |
6184f4fe218bfd606582f03f1fc738924fc5eb85 | 893 | py | Python | level1/migrations/0002_auto_20200219_1812.py | smateenml/arabic | 948f66a4e3e39d49c9420f02ced89e0e1558086c | [
"Apache-2.0"
] | 1 | 2022-03-27T07:17:02.000Z | 2022-03-27T07:17:02.000Z | level1/migrations/0002_auto_20200219_1812.py | smateenml/arabic | 948f66a4e3e39d49c9420f02ced89e0e1558086c | [
"Apache-2.0"
] | null | null | null | level1/migrations/0002_auto_20200219_1812.py | smateenml/arabic | 948f66a4e3e39d49c9420f02ced89e0e1558086c | [
"Apache-2.0"
] | 1 | 2022-03-27T07:17:03.000Z | 2022-03-27T07:17:03.000Z | # Generated by Django 3.0.2 on 2020-02-19 18:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('level1', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='conjugation',
name='he_future',
),
migrations.RemoveField(
model_name='conjugation',
name='i_future',
),
migrations.RemoveField(
model_name='conjugation',
name='she_future',
),
migrations.RemoveField(
model_name='conjugation',
name='we_future',
),
migrations.RemoveField(
model_name='conjugation',
name='you_female_future',
),
migrations.RemoveField(
model_name='conjugation',
name='you_male_future',
),
]
| 23.5 | 47 | 0.536394 | 808 | 0.904815 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.254199 |
6184ff3e6d5499ae2279934df653ba237021e92e | 328 | py | Python | rh/apps/case_studies/cms_apps.py | rapidpro/chpro-microsite | 4e1d1210b49ec60ab0711d78235bf45eeb5c0275 | [
"BSD-3-Clause"
] | null | null | null | rh/apps/case_studies/cms_apps.py | rapidpro/chpro-microsite | 4e1d1210b49ec60ab0711d78235bf45eeb5c0275 | [
"BSD-3-Clause"
] | 108 | 2018-01-30T15:26:18.000Z | 2021-06-10T17:29:57.000Z | rh/apps/case_studies/cms_apps.py | rapidpro/chpro-microsite | 4e1d1210b49ec60ab0711d78235bf45eeb5c0275 | [
"BSD-3-Clause"
] | null | null | null | from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class CaseStudiesApphook(CMSApp):
app_name = "case_studies"
name = "Case Studies Application"
def get_urls(self, page=None, language=None, **kwargs):
return ["rh.apps.case_studies.urls"]
apphook_pool.register(CaseStudiesApphook)
| 23.428571 | 59 | 0.75 | 207 | 0.631098 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.204268 |
6186d31062ff042063bc9ec5db6e940f05791cef | 2,989 | py | Python | manim_demo/project/matplotlib_demo.py | shujunge/manim_tutorial | 8e320373f0404dcc0a200ab3750ee70784dc1345 | [
"MIT"
] | null | null | null | manim_demo/project/matplotlib_demo.py | shujunge/manim_tutorial | 8e320373f0404dcc0a200ab3750ee70784dc1345 | [
"MIT"
] | null | null | null | manim_demo/project/matplotlib_demo.py | shujunge/manim_tutorial | 8e320373f0404dcc0a200ab3750ee70784dc1345 | [
"MIT"
] | null | null | null | from manimlib.imports import *
from srcs.utils import run
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg
from sklearn import svm # sklearn = scikit-learn
from sklearn.datasets import make_moons
def mplfig_to_npimage(fig):
""" Converts a matplotlib figure to a RGB frame after updating the canvas"""
# only the Agg backend now supports the tostring_rgb function
canvas = FigureCanvasAgg(fig)
canvas.draw() # update/draw the elements
# get the width and the height to resize the matrix
l,b,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
# exports the canvas to a string buffer and then to a numpy nd.array
buf = canvas.tostring_rgb()
image= np.frombuffer(buf, dtype=np.uint8)
plt.close()
return image.reshape(h, w, 3)
def make_frame_mpl(t):
fig_mpl, ax = plt.subplots(1, figsize=(5, 3), facecolor='white')
xx = np.linspace(-2, 2, 200) # x向量
zz = lambda d: np.sinc(xx ** 2) + np.sin(xx + d) # (变化的)Z向量
ax.set_title("Elevation in y=0")
ax.set_ylim(-1.5, 2.5)
line, = ax.plot(xx, zz(0), lw=3)
line.set_ydata( zz(np.pi*t)) # 更新曲面
return mplfig_to_npimage(fig_mpl) # 图形的RGB图像
def make_frame(t):
X, Y = make_moons(50, noise=0.1, random_state=2) # 半随机数据
fig, ax = plt.subplots(1, figsize=(4, 4), facecolor=(1, 1, 1))
fig.subplots_adjust(left=0, right=1, bottom=0)
xx, yy = np.meshgrid(np.linspace(-2, 3, 500), np.linspace(-1, 2, 500))
ax.clear()
ax.axis('off')
ax.set_title("SVC classification", fontsize=16)
classifier = svm.SVC(gamma=2, C=1)
# 不断变化的权重让数据点一个接一个的出现
weights = np.minimum(1, np.maximum(0, t**2+10-np.arange(50)))
classifier.fit(X, Y, sample_weight=weights)
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=plt.cm.bone, alpha=0.8,
vmin=-2.5, vmax=2.5, levels=np.linspace(-2,2,20))
ax.scatter(X[:,0], X[:,1], c=Y, s=50*weights, cmap=plt.cm.bone)
return mplfig_to_npimage(fig)
class manim_with_animation(Scene):
def construct(self):
during_times = ValueTracker(0)
self.img = ImageMobject(make_frame_mpl(0))
self.left_img = ImageMobject(make_frame(0))
self.img.add_updater(lambda d: d.set_array(make_frame_mpl(during_times.get_value())))
self.img.shift(2*RIGHT)
self.left_img.add_updater(lambda d: d.set_array(make_frame(during_times.get_value())))
self.left_img.shift(2*LEFT)
self.play(ShowCreation(self.img), ShowCreation(self.left_img), run_times=2)
for i in range(6):
self.play(during_times.increment_value, 0.5*i, rate_func=linear,run_times=0.5*i)
#
# for i in range(6)[::-1]:
# self.play(during_times.increment_value, 0.1*i, rate_func=linear,run_times=0.1*i)
self.wait()
if __name__=="__main__":
run([manim_with_animation]) | 32.846154 | 94 | 0.658749 | 828 | 0.269444 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.203059 |
6186f8d5ab1178cb5b433da5dd21ea78201073c1 | 3,513 | py | Python | dlms_cosem/clients/experimental_meter.py | Layty/dlms-cosem | 95b67054a1dfb928e960547b0246b7b6794f0594 | [
"MIT"
] | 1 | 2021-08-20T09:19:07.000Z | 2021-08-20T09:19:07.000Z | dlms_cosem/clients/experimental_meter.py | Layty/dlms-cosem | 95b67054a1dfb928e960547b0246b7b6794f0594 | [
"MIT"
] | null | null | null | dlms_cosem/clients/experimental_meter.py | Layty/dlms-cosem | 95b67054a1dfb928e960547b0246b7b6794f0594 | [
"MIT"
] | null | null | null | from contextlib import contextmanager
from decimal import Decimal
from typing import *
import attr
from dlms_cosem import cosem, utils
from dlms_cosem.clients.dlms_client import DlmsClient
from dlms_cosem.cosem import Obis
from dlms_cosem.cosem.profile_generic import Data, ProfileGeneric
from dlms_cosem.protocol.xdlms.selective_access import RangeDescriptor
def force_obis(maybe_obis: Union[str, Obis]):
if isinstance(maybe_obis, Obis):
return maybe_obis
else:
return Obis.from_dotted(maybe_obis)
@attr.s(auto_attribs=True)
class NumericalMeterValue:
"""
When parsing data from a DlmsMeter it can be a simple value that we know the meta
data around before since it is predefined. For example static attribute on an
interface class. For example capture_period on ProfileGeneric is an integer in
minutes.
Other attributes can be objects that is returned so that we may correctly parse
data in other attributes: Ex. capture_objects in Profile Generic.
Modeling values when they are not part of an attribute on an interface class is
usually done via interface classes DATA and REGISTER.
We also have numerial based values and string-based values.
"""
# To avoid floating point rounding error we should always use Decimal to
# represent a numerical value in a meter.
value: Decimal
# Scalar is used if the value provides one
scalar: Optional[int]
# see table 4 in Blue Book.
# TODO: simple Enum or a factory that returns Unit objects?
unit: Any
@attr.s(auto_attribs=True)
class StringlikeMeterValue:
"""
For meter values that are string-like. Bytes should be represented as hexadecimal
strings If we can't decode them to strings.
"""
pass
@attr.s(auto_attribs=True)
class Meter:
dlms_client: DlmsClient
objects: Dict[str, Union[ProfileGeneric, Data]]
def object_exists(self, object_obis: Obis) -> bool:
return object_obis.dotted_repr() in self.objects.keys()
@contextmanager
def session(self):
self.dlms_client.associate()
yield self
self.dlms_client.release_association()
def get(
self,
logical_name: Union[str, Obis],
attribute: int,
selective_access: Optional[RangeDescriptor] = None,
):
obis = force_obis(logical_name)
instance = self.objects.get(obis.dotted_repr(), None)
if instance is None:
raise ValueError(
f"Object with logical name {obis.dotted_repr()} does not exist on meter"
)
if instance.is_static_attribute(attribute):
# check if the value is already present on the meter
value = getattr(
instance, instance.STATIC_ATTRIBUTES[attribute].attribute_name
)
if value:
return value
value = utils.parse_as_dlms_data(
self.dlms_client.get(
cosem_attribute=cosem.CosemAttribute(
interface=instance.INTERFACE_CLASS_ID,
instance=obis,
attribute=attribute,
)
)
)
if instance.is_static_attribute(attribute):
setattr(
instance, instance.STATIC_ATTRIBUTES[attribute].attribute_name, value
)
converter = instance.DYNAMIC_CONVERTERS.get(attribute, None)
if converter:
return converter(instance, value)
return value
| 31.648649 | 88 | 0.670937 | 2,896 | 0.824367 | 121 | 0.034443 | 2,977 | 0.847424 | 0 | 0 | 1,145 | 0.325932 |
61899674fe81953319c1bcfc90f72353e48fa375 | 8,637 | py | Python | inference/encoder_train_pipeline.py | kuzhamuratov/deep-landscape | 255999cb2f87bc72b68fe29483a4fb7605f0e26b | [
"MIT"
] | 95 | 2020-07-21T15:57:12.000Z | 2022-03-28T02:46:40.000Z | inference/encoder_train_pipeline.py | kuzhamuratov/deep-landscape | 255999cb2f87bc72b68fe29483a4fb7605f0e26b | [
"MIT"
] | 5 | 2020-09-02T10:00:20.000Z | 2020-12-19T05:19:16.000Z | inference/encoder_train_pipeline.py | kuzhamuratov/deep-landscape | 255999cb2f87bc72b68fe29483a4fb7605f0e26b | [
"MIT"
] | 21 | 2020-09-09T14:31:48.000Z | 2022-03-24T13:47:58.000Z | import copy
import datetime
import os
import random
import traceback
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from inference.inference_utils import get_trange, get_tqdm
def init_random_seed(value=0):
random.seed(value)
np.random.seed(value)
torch.manual_seed(value)
torch.cuda.manual_seed(value)
torch.backends.cudnn.deterministic = True
def copy_data_to_device(data, device):
if torch.is_tensor(data):
return data.to(device)
elif isinstance(data, (list, tuple)):
return [copy_data_to_device(elem, device) for elem in data]
elif isinstance(data, dict):
return {name: copy_data_to_device(value, device) for name, value in data.items()}
raise ValueError('Unexpected data type {}'.format(type(data)))
def sum_dicts(current, new):
if current is None:
return new
result = dict(current)
for name, new_value in new.items():
result[name] = result.get(name, 0) + new_value
return result
def norm_dict(current, n):
if n == 0:
return current
return {name: value / (n + 1e-6) for name, value in current.items()}
def train_eval_loop(model, train_dataset, val_dataset, criterion,
lr=1e-4, epoch_n=10, batch_size=32,
device='cuda', early_stopping_patience=10, l2_reg_alpha=0,
max_batches_per_epoch_train=10000,
max_batches_per_epoch_val=1000,
data_loader_ctor=DataLoader,
optimizer_ctor=None,
lr_scheduler_ctor=None,
shuffle_train=True,
dataloader_workers_n=0,
clip_grad=10,
save_vis_images_path=None,
save_vis_images_freq=100,
save_models_path=None,
save_models_freq=10):
device = torch.device(device)
model.to(device)
if optimizer_ctor is None:
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_reg_alpha)
else:
optimizer = optimizer_ctor(model.parameters(), lr=lr)
if lr_scheduler_ctor is not None:
lr_scheduler = lr_scheduler_ctor(optimizer)
else:
lr_scheduler = None
train_dataloader = data_loader_ctor(train_dataset, batch_size=batch_size, shuffle=shuffle_train,
num_workers=dataloader_workers_n)
val_dataloader = data_loader_ctor(val_dataset, batch_size=batch_size, shuffle=False,
num_workers=dataloader_workers_n)
best_val_loss = float('inf')
best_val_metrics = None
best_epoch_i = 0
best_model = copy.deepcopy(model)
for epoch_i in get_trange(epoch_n, desc='Epochs'):
try:
epoch_start = datetime.datetime.now()
print('Epoch {}'.format(epoch_i))
model.train()
mean_train_loss = 0
mean_train_metrics = None
train_batches_n = 0
for batch_i, (batch_x, batch_y) in get_tqdm(enumerate(train_dataloader), desc=f'Epoch {epoch_i}',
total=max_batches_per_epoch_train, leave=True):
if batch_i > max_batches_per_epoch_train:
break
batch_x = copy_data_to_device(batch_x, device)
batch_y = copy_data_to_device(batch_y, device)
pred = model(batch_x)
loss, metrics, vis_img = criterion(pred, batch_y)
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
optimizer.step()
mean_train_loss += float(loss)
mean_train_metrics = sum_dicts(mean_train_metrics, metrics)
if vis_img is not None and save_vis_images_path is not None and batch_i % save_vis_images_freq == 0:
save_image(vis_img,
os.path.join(save_vis_images_path,
'epoch{:04d}_iter{:06d}_train.jpg'.format(epoch_i, batch_i)),
nrow=batch_y['images'].shape[0],
normalize=True,
range=(-1, 1))
train_batches_n += 1
mean_train_loss /= train_batches_n
mean_train_metrics = norm_dict(mean_train_metrics, train_batches_n)
print('Epoch: {} iterations, {:0.2f} sec'.format(train_batches_n,
(datetime.datetime.now() - epoch_start).total_seconds()))
print('Mean train loss', mean_train_loss, mean_train_metrics)
if save_models_path is not None and epoch_i % save_models_freq == 0:
torch.save(model, os.path.join(save_models_path, 'model_epoch_{:04d}.pth'.format(epoch_i)))
model.eval()
mean_val_loss = 0
mean_val_metrics = None
val_batches_n = 0
with torch.no_grad():
for batch_i, (batch_x, batch_y) in enumerate(val_dataloader):
if batch_i > max_batches_per_epoch_val:
break
batch_x = copy_data_to_device(batch_x, device)
batch_y = copy_data_to_device(batch_y, device)
pred = model(batch_x)
loss, metrics, vis_img = criterion(pred, batch_y)
mean_val_loss += float(loss)
mean_val_metrics = sum_dicts(mean_val_metrics, metrics)
if vis_img is not None and save_vis_images_path is not None and batch_i % save_vis_images_freq == 0:
save_image(vis_img,
os.path.join(save_vis_images_path,
'epoch{:04d}_iter{:06d}_val.jpg'.format(epoch_i, batch_i)),
nrow=batch_y['images'].shape[0],
normalize=True,
range=(-1, 1))
val_batches_n += 1
mean_val_loss /= val_batches_n + 1e-6
mean_val_metrics = norm_dict(mean_val_metrics, val_batches_n)
print('Mean validation loss', mean_val_loss, mean_val_metrics)
if mean_val_loss < best_val_loss:
best_epoch_i = epoch_i
best_val_loss = mean_val_loss
best_val_metrics = mean_val_metrics
best_model = copy.deepcopy(model)
print('New best model!')
if save_models_path is not None:
torch.save(best_model, os.path.join(save_models_path, 'best_model.pth'))
elif epoch_i - best_epoch_i > early_stopping_patience:
print('Model has not improved during the last {} epochs, stopping training early'.format(
early_stopping_patience))
break
if lr_scheduler is not None:
lr_scheduler.step(mean_val_loss)
print()
except KeyboardInterrupt:
print('Interrupted by user')
break
except Exception as ex:
print('Fatal error during training: {}\n{}'.format(ex, traceback.format_exc()))
break
return best_val_loss, best_val_metrics, best_model
def predict_with_model(model, dataset, device='cuda', batch_size=32, num_workers=0, return_labels=False):
"""
:param model: torch.nn.Module - trained model
:param dataset: torch.utils.data.Dataset - data to apply model
:param device: cuda/cpu
:param batch_size:
:return: numpy.array dimensionality len(dataset) x *
"""
results_by_batch = []
device = torch.device(device)
model.to(device)
model.eval()
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
labels = []
with torch.no_grad():
import tqdm
for batch_x, batch_y in tqdm.tqdm_notebook(dataloader, total=len(dataset)/batch_size):
batch_x = copy_data_to_device(batch_x, device)
if return_labels:
labels.append(batch_y.numpy())
batch_pred = model(batch_x)
results_by_batch.append(batch_pred.detach().cpu().numpy())
if return_labels:
return np.concatenate(results_by_batch, 0), np.concatenate(labels, 0)
else:
return np.concatenate(results_by_batch, 0)
| 38.216814 | 120 | 0.587009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 660 | 0.076415 |
618aadd7fe62fbfa6544e105456f6c6a8c7ddd78 | 1,443 | py | Python | app/forms.py | paulpaulaga/ask-paul | 1af662243ac5d05e3f3b237f743a11de25d712e7 | [
"MIT"
] | null | null | null | app/forms.py | paulpaulaga/ask-paul | 1af662243ac5d05e3f3b237f743a11de25d712e7 | [
"MIT"
] | 1 | 2021-10-05T07:42:54.000Z | 2021-10-05T07:42:54.000Z | app/forms.py | paulpaulaga/ask-paul | 1af662243ac5d05e3f3b237f743a11de25d712e7 | [
"MIT"
] | null | null | null | from django import forms
from app.models import Question, Profile, Answer
from django.contrib.auth.models import User
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].required = True
self.fields['password'].required = True
class AskForm(forms.ModelForm):
class Meta:
model = Question
fields = ['title', 'text']
def save(self, *args, **kwargs):
user = kwargs.pop('user')
question = super().save(*args, **kwargs)
question.user = user
question.save()
return question
class UserSignupForm(forms.ModelForm):
avatar = forms.ImageField()
password = forms.CharField(widget=forms.PasswordInput)
confirm_password = forms.CharField(widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['email'].required = True
class Meta:
model = User
fields = ['username', 'email', 'first_name', 'last_name', 'password']
class AnswerForm(forms.ModelForm):
class Meta:
model = Answer
fields = ['text']
def save(self, *args, **kwargs):
user = kwargs.pop('user')
answer = super().save(*args, **kwargs)
answer.user = user
answer.save()
| 27.226415 | 77 | 0.627859 | 1,313 | 0.90991 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.074844 |
618abbfd430402efb7dde78a0d8e25929179a059 | 3,722 | py | Python | python/cc_emergency/functional/transforms/language_filter.py | DavidNemeskey/cc_emergency_corpus | 6a6707ea6e939047014db7de8a4031ae7901351a | [
"MIT"
] | null | null | null | python/cc_emergency/functional/transforms/language_filter.py | DavidNemeskey/cc_emergency_corpus | 6a6707ea6e939047014db7de8a4031ae7901351a | [
"MIT"
] | null | null | null | python/cc_emergency/functional/transforms/language_filter.py | DavidNemeskey/cc_emergency_corpus | 6a6707ea6e939047014db7de8a4031ae7901351a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
"""Language / domain filtering transforms."""
from functools import partial
import importlib
import inspect
import tldextract
from cc_emergency.functional.core import Filter
from cc_emergency.utils import first
class LanguageFilter(Filter):
"""Filters objects for language(s)."""
LIBRARIES = {'langid': 'langid', 'cld2': 'cld2-cffi'}
def __init__(self, fields, languages, libraries=None):
"""
- fields: either the name of the field on which to perform the language
identification, or a list of fields
- languages: either the name of a language or a list of languages
- libraries: the libraries to try, in order. The default is cld2 alone,
but langid is also supported, as well as any combinations
of these two.
"""
super(LanguageFilter, self).__init__()
if not isinstance(fields, list):
fields = [fields]
if not isinstance(languages, list):
languages = [languages]
self.languages = set(languages)
self.fields = fields
self.libraries = libraries if libraries else ['cld2']
self.detectors = []
def __enter__(self):
for lib in self.libraries:
if lib not in self.LIBRARIES:
raise ValueError('Unsupported library "{}"'.format(lib))
try:
self.logger.debug('Loading {}...'.format(lib))
detector_lib = importlib.import_module(lib)
detector_fn = inspect.getmembers(
self,
lambda o: inspect.ismethod(o) and o.__name__ == '__' + lib,
)[0][1]
self.detectors.append(partial(detector_fn, detector_lib))
except ImportError:
raise ImportError(
'The {} module '.format(self.LIBRARIES[lib]) +
'is needed for LanguageFilter to work.')
return self
def transform(self, obj):
text = '\n'.join(obj.get(field, '') for field in self.fields)
for detector in self.detectors:
try:
lang = detector(text)
if lang != 'un':
return lang in self.languages
except:
pass
else:
self.logger.debug(
'Could not detect language for document {}'.format(
first(obj.values())))
return False
def __langid(self, lib, text):
return lib.classify(text)[0]
def __cld2(self, lib, text):
return lib.detect(text).details[0].language_code
class DomainFilter(Filter):
def __init__(self, domains, field='url', expression='suffix', retain=True):
"""
Filters all urls (by default) not in, or, if the retain argument
is False, in the the specified list. The class allows the user to
assemble an expression from the parts of a domain (subdomain,
domain and suffix), and the class will compare that agains the list.
The default expression is 'suffix', i.e. the TLD.
"""
super(DomainFilter, self).__init__()
self.field = field
self.domains = set(domains)
self.expression = compile(expression, '<string>', 'eval')
self.check = self.__in if retain else self.__not_in
def __in(self, domain):
return domain in self.domains
def __not_in(self, domain):
return domain not in self.domains
def transform(self, obj):
variables = tldextract.extract(obj[self.field].lower())._asdict()
return self.check(eval(self.expression, variables))
| 36.135922 | 79 | 0.59108 | 3,446 | 0.925846 | 0 | 0 | 0 | 0 | 0 | 0 | 1,141 | 0.306556 |
618dbd305a51529f96610d98339902bb9a67056f | 241 | py | Python | mathgenerator/funcs/volumeSphereFunc.py | furins/mathgenerator | 3ba50015ef4d9abaa404b5d3b9bb272cfdcf2deb | [
"MIT"
] | null | null | null | mathgenerator/funcs/volumeSphereFunc.py | furins/mathgenerator | 3ba50015ef4d9abaa404b5d3b9bb272cfdcf2deb | [
"MIT"
] | null | null | null | mathgenerator/funcs/volumeSphereFunc.py | furins/mathgenerator | 3ba50015ef4d9abaa404b5d3b9bb272cfdcf2deb | [
"MIT"
] | null | null | null | from .__init__ import *
def volumeSphereFunc(maxRadius = 100):
r=random.randint(1,maxRadius)
problem=f"Volume of sphere with radius {r} m = "
ans=(4*math.pi/3)*r*r*r
solution = f"{ans} m^3"
return problem,solution
| 21.909091 | 52 | 0.647303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.215768 |
618dc6198da7239c2edc0143aac74d64a250d04c | 841 | py | Python | create_widget.py | Adam01/Cylinder | 84ca02e2860df579a4e0514c4060a0c497e167a5 | [
"MIT"
] | 4 | 2015-02-19T17:46:27.000Z | 2018-10-27T08:00:58.000Z | create_widget.py | Adam01/Cylinder | 84ca02e2860df579a4e0514c4060a0c497e167a5 | [
"MIT"
] | 3 | 2015-04-09T18:14:35.000Z | 2015-05-07T02:36:48.000Z | create_widget.py | Adam01/Cylinder | 84ca02e2860df579a4e0514c4060a0c497e167a5 | [
"MIT"
] | null | null | null | import os
import sys
import shutil
templateDir = "./Cylinder/WidgetTemplate"
widgetOutDir = "./Cylinder/rapyd/Widgets/"
if len(sys.argv) > 1:
name = sys.argv[1]
widgetDir = os.path.join(widgetOutDir, name)
if not os.path.exists(widgetDir):
if os.path.exists(templateDir):
os.mkdir(widgetDir)
for item in os.listdir(templateDir):
widgetTemplateItem = os.path.join(templateDir, item)
widgetItem = os.path.join(widgetDir, name + os.path.splitext(item)[1] )
print "Copying %s as %s" % (widgetTemplateItem, widgetItem)
shutil.copy(widgetTemplateItem, widgetItem)
print "Done"
else:
print "Unable to find template dir"
else:
print "Widget already exists"
else:
print "Specify widget name" | 32.346154 | 87 | 0.62069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.179548 |
618ed936ea97eb91d0ed1cb9b6eb7123a3f94c4d | 1,380 | py | Python | examples/06-worker.py | pacslab/serverless-performance-simulator | 5dd1623ad873d005b457676c060232205de7f552 | [
"MIT"
] | 5 | 2021-06-17T15:50:40.000Z | 2022-02-15T21:24:39.000Z | examples/06-worker.py | sicilly/simfaas | 5dd1623ad873d005b457676c060232205de7f552 | [
"MIT"
] | null | null | null | examples/06-worker.py | sicilly/simfaas | 5dd1623ad873d005b457676c060232205de7f552 | [
"MIT"
] | 5 | 2020-07-19T22:57:34.000Z | 2021-11-16T01:40:03.000Z | import zmq
import time
import sys
import struct
import multiprocessing
from examples.sim_trace import generate_trace
port = "5556"
if len(sys.argv) > 1:
port = sys.argv[1]
int(port)
socket_addr = "tcp://127.0.0.1:%s" % port
worker_count = multiprocessing.cpu_count() * 2 + 1
stop_signal = False
def worker(context=None, name="worker"):
context = context or zmq.Context.instance()
worker = context.socket(zmq.ROUTER)
worker.connect(socket_addr)
print(f"Starting thread: {name}")
poller = zmq.Poller()
poller.register(worker, zmq.POLLIN)
while not stop_signal:
socks = dict(poller.poll(timeout=1000))
if worker in socks and socks[worker] == zmq.POLLIN:
ident, message = worker.recv_multipart()
# calculate trace
msg = struct.pack("d", generate_trace())
worker.send_multipart([ident, msg])
if __name__ == "__main__":
worker_names = [f"worker-{i}" for i in range(worker_count)]
worker_threads = [multiprocessing.Process(target=worker, args=(None,n)) for n in worker_names]
_ = [t.start() for t in worker_threads]
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print("Ctrl-c pressed!")
stop_signal = True
[t.join() for t in worker_threads]
break
| 26.538462 | 98 | 0.626087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.086957 |
6190146c6aed9af23bf26291c4ff9c3eaefe84de | 3,532 | py | Python | ensembl_genes/species.py | ravwojdyla/ensembl-genes | f48f9f23427a34a8920bea19b9b7c220eb43facc | [
"Apache-2.0"
] | 5 | 2021-10-11T17:18:48.000Z | 2022-02-22T03:32:39.000Z | ensembl_genes/species.py | ravwojdyla/ensembl-genes | f48f9f23427a34a8920bea19b9b7c220eb43facc | [
"Apache-2.0"
] | 13 | 2021-10-11T18:30:30.000Z | 2022-03-04T15:36:21.000Z | ensembl_genes/species.py | ravwojdyla/ensembl-genes | f48f9f23427a34a8920bea19b9b7c220eb43facc | [
"Apache-2.0"
] | 2 | 2021-10-15T18:31:53.000Z | 2022-02-12T04:23:18.000Z | from dataclasses import dataclass
from typing import Optional, Union
from ensembl_genes.models import GeneForMHC
@dataclass
class Species:
name: str
common_name: str
assembly: str
ensembl_gene_pattern: str
enable_mhc: bool
mhc_chromosome: str
mhc_lower: int
mhc_upper: int
xmhc_lower: int
xmhc_upper: int
chromosomes: list[str]
def get_mhc_category(self, gene: GeneForMHC) -> Optional[str]:
"""Assign MHC status of MHC, xMHC, or no to an ensembl gene record."""
if not self.enable_mhc:
return None
import pandas as pd
chromosome: str = gene.chromosome
start: int = gene.seq_region_start
end: int = gene.seq_region_end
if chromosome != self.mhc_chromosome:
return "no"
# Ensembl uses 1 based indexing, such that the interval should include
# the end (closed) as per https://www.biostars.org/p/84686/.
gene_interval = pd.Interval(left=start, right=end, closed="both")
mhc = pd.Interval(left=self.mhc_lower, right=self.mhc_upper, closed="both")
xmhc = pd.Interval(left=self.xmhc_lower, right=self.xmhc_upper, closed="both")
if gene_interval.overlaps(mhc):
return "MHC"
if gene_interval.overlaps(xmhc):
return "xMHC"
return "no"
human = Species(
name="homo_sapiens",
common_name="human",
# GRCh38
assembly="38",
# Regex pattern that valid human ensembl gene IDs should match.
# https://bioinformatics.stackexchange.com/a/15044/9750
ensembl_gene_pattern=r"^ENSG[0-9]{11}$",
# Refs MHC boundary discussion internal Related Sciences issue 127.
# https://bioinformatics.stackexchange.com/a/14719/9750
enable_mhc=True,
mhc_chromosome="6",
mhc_lower=28_510_120,
mhc_upper=33_480_577,
xmhc_lower=25_726_063,
xmhc_upper=33_410_226,
# Chromosome names applied to genes on the primary assembly rather than alternative sequences.
# Refs internal Related Sciences issue 241.
chromosomes=[*map(str, range(1, 22 + 1)), "X", "Y", "MT"],
)
mouse = Species(
name="mus_musculus",
common_name="mouse",
assembly="39", # GRCm39
ensembl_gene_pattern=r"^ENSMUSG[0-9]{11}$",
# FIXME: mhc coordinates (H2 complex)
# https://doi.org/10.1002/9780470015902.a0000921.pub4
enable_mhc=False,
mhc_chromosome="17",
mhc_lower=28_510_120,
mhc_upper=33_480_577,
xmhc_lower=25_726_063,
xmhc_upper=33_410_226,
chromosomes=[*map(str, range(1, 19 + 1)), "X", "Y", "MT"],
)
rat = Species(
name="rattus_norvegicus",
common_name="rat",
# mRatBN7.2
assembly="72",
# https://github.com/related-sciences/ensembl-genes/issues/4#issuecomment-941556912
ensembl_gene_pattern=r"^ENSRNOG[0-9]{11}$",
# FIXME: mhc coordinates
# https://github.com/related-sciences/ensembl-genes/pull/6#discussion_r729259953
enable_mhc=False,
mhc_chromosome="20",
mhc_lower=28_510_120,
mhc_upper=33_480_577,
xmhc_lower=25_726_063,
xmhc_upper=33_410_226,
chromosomes=[*map(str, range(1, 20 + 1)), "X", "Y", "MT"],
)
all_species = [human, mouse, rat]
def get_species(species: Union[str, Species]) -> Species:
"""Lookup species string from defined Species."""
if isinstance(species, Species):
return species
for match in all_species:
if species.lower() in {match.name, match.common_name}:
return match
raise ValueError(f"species {species!r} not found.")
| 32.40367 | 98 | 0.668743 | 1,218 | 0.344847 | 0 | 0 | 1,229 | 0.347961 | 0 | 0 | 1,179 | 0.333805 |
6191690a86eb7024d9f9224d6a7236a89bda9fa5 | 1,294 | py | Python | app_covid19data/tests/test_views.py | falken20/covid19web | 3826e5cc51dc24d373a1f614ccdb7c30993312ce | [
"MIT"
] | null | null | null | app_covid19data/tests/test_views.py | falken20/covid19web | 3826e5cc51dc24d373a1f614ccdb7c30993312ce | [
"MIT"
] | null | null | null | app_covid19data/tests/test_views.py | falken20/covid19web | 3826e5cc51dc24d373a1f614ccdb7c30993312ce | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from model_bakery import baker
from app_covid19data.models import DataCovid19Item
from app_covid19data import views
class Covid19dataTest(TestCase):
def setUp(self):
""" Method which the testing framework will automatically call before every single test we run """
# Create several rows
self.datacovid19 = baker.make(DataCovid19Item, country='Spain', date=timezone.now().date(),
dead_cases=1, confirmed_cases=1, recovered_cases=1,
_quantity=5)
# Views tests
def test_covid19data_resume_view(self):
url = reverse(views.resume_view)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
# self.assertIn(w.title, resp.content)
def test_covid19data_get_resume_country(self):
# The data for the test are loading in above setUp function
queryset = views.get_resume_country('Spain')
self.assertEqual(queryset['country'], 'Spain')
def test_covid19data_get_detail_country(self):
queryset = views.get_detail_country('Spain')
print(queryset)
self.assertGreaterEqual(len(queryset), 1)
| 36.971429 | 106 | 0.681607 | 1,075 | 0.830757 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.205564 |
619239cbab47f38d61f102bfd2c870af018bffa8 | 407 | py | Python | Chapter05/restful_python_2_05/Django01/games_service/games/models.py | PacktPublishing/Hands-On-RESTful-Python-Web-Services-Second-Edition | db8212c90f6394d8ee6fadb038e2b01ef83c963d | [
"MIT"
] | 45 | 2018-12-21T01:02:16.000Z | 2022-03-18T08:23:13.000Z | Chapter05/restful_python_2_05/Django01/games_service/games/models.py | PacktPublishing/Hands-On-RESTful-Python-Web-Services-Second-Edition | db8212c90f6394d8ee6fadb038e2b01ef83c963d | [
"MIT"
] | 12 | 2020-02-11T23:32:33.000Z | 2021-06-10T22:29:56.000Z | Chapter05/restful_python_2_05/Django01/games_service/games/models.py | PacktPublishing/Hands-On-RESTful-Python-Web-Services-Second-Edition | db8212c90f6394d8ee6fadb038e2b01ef83c963d | [
"MIT"
] | 29 | 2019-02-11T16:45:56.000Z | 2022-03-29T12:43:27.000Z | from django.db import models
class Game(models.Model):
created_timestamp = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=200)
release_date = models.DateTimeField()
esrb_rating = models.CharField(max_length=150)
played_once = models.BooleanField(default=False)
played_times = models.IntegerField(default=0)
class Meta:
ordering = ('name',)
| 29.071429 | 63 | 0.732187 | 375 | 0.921376 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.014742 |
61938ef87f807b50b7ef975df7fc7cd96772c7e3 | 7,100 | py | Python | old_py2/controllers/datafeed_controller.py | bovlb/the-blue-alliance | 29389649d96fe060688f218d463e642dcebfd6cc | [
"MIT"
] | null | null | null | old_py2/controllers/datafeed_controller.py | bovlb/the-blue-alliance | 29389649d96fe060688f218d463e642dcebfd6cc | [
"MIT"
] | null | null | null | old_py2/controllers/datafeed_controller.py | bovlb/the-blue-alliance | 29389649d96fe060688f218d463e642dcebfd6cc | [
"MIT"
] | null | null | null | import logging
import os
import datetime
import tba_config
import time
import json
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from consts.event_type import EventType
from consts.media_type import MediaType
from consts.media_tag import MediaTag
from datafeeds.datafeed_fms_api import DatafeedFMSAPI
from datafeeds.datafeed_tba import DatafeedTba
from datafeeds.datafeed_resource_library import DatafeedResourceLibrary
from helpers.district_manipulator import DistrictManipulator
from helpers.event_helper import EventHelper
from helpers.event_manipulator import EventManipulator
from helpers.event_details_manipulator import EventDetailsManipulator
from helpers.event_team_manipulator import EventTeamManipulator
from helpers.match_manipulator import MatchManipulator
from helpers.match_helper import MatchHelper
from helpers.award_manipulator import AwardManipulator
from helpers.media_manipulator import MediaManipulator
from helpers.team_manipulator import TeamManipulator
from helpers.district_team_manipulator import DistrictTeamManipulator
from helpers.robot_manipulator import RobotManipulator
from helpers.event.offseason_event_helper import OffseasonEventHelper
from helpers.suggestions.suggestion_creator import SuggestionCreator
from models.district_team import DistrictTeam
from models.event import Event
from models.event_details import EventDetails
from models.event_team import EventTeam
from models.media import Media
from models.robot import Robot
from models.sitevar import Sitevar
from models.team import Team
from sitevars.website_blacklist import WebsiteBlacklist
class DistrictRankingsGet(webapp.RequestHandler):
"""
Fetch district rankings from FIRST
This data does not have full pre-event point breakdowns, but it does contain
things like CMP advancement
"""
def get(self, district_key):
class TbaVideosEnqueue(webapp.RequestHandler):
"""
Handles enqueing grabing tba_videos for Matches at individual Events.
"""
def get(self):
events = Event.query()
for event in events:
taskqueue.add(
url='/tasks/get/tba_videos/' + event.key_name,
method='GET')
template_values = {
'event_count': Event.query().count(),
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/tba_videos_enqueue.html')
self.response.out.write(template.render(path, template_values))
class TbaVideosGet(webapp.RequestHandler):
"""
Handles reading a TBA video listing page and updating the match objects in the datastore as needed.
"""
def get(self, event_key):
df = DatafeedTba()
event = Event.get_by_id(event_key)
match_filetypes = df.getVideos(event)
if match_filetypes:
matches_to_put = []
for match in event.matches:
if match.tba_videos != match_filetypes.get(match.key_name, []):
match.tba_videos = match_filetypes.get(match.key_name, [])
match.dirty = True
matches_to_put.append(match)
MatchManipulator.createOrUpdate(matches_to_put)
tbavideos = match_filetypes.items()
else:
logging.info("No tbavideos found for event " + event.key_name)
tbavideos = []
template_values = {
'tbavideos': tbavideos,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/tba_videos_get.html')
self.response.out.write(template.render(path, template_values))
class HallOfFameTeamsGet(webapp.RequestHandler):
"""
Handles scraping the list of Hall of Fame teams from FIRST resource library.
"""
def get(self):
df = DatafeedResourceLibrary()
teams = df.getHallOfFameTeams()
if teams:
media_to_update = []
for team in teams:
team_reference = Media.create_reference('team', team['team_id'])
video_foreign_key = team['video']
if video_foreign_key:
media_to_update.append(Media(id=Media.render_key_name(MediaType.YOUTUBE_VIDEO, video_foreign_key),
media_type_enum=MediaType.YOUTUBE_VIDEO,
media_tag_enum=[MediaTag.CHAIRMANS_VIDEO],
references=[team_reference],
year=team['year'],
foreign_key=video_foreign_key))
presentation_foreign_key = team['presentation']
if presentation_foreign_key:
media_to_update.append(Media(id=Media.render_key_name(MediaType.YOUTUBE_VIDEO, presentation_foreign_key),
media_type_enum=MediaType.YOUTUBE_VIDEO,
media_tag_enum=[MediaTag.CHAIRMANS_PRESENTATION],
references=[team_reference],
year=team['year'],
foreign_key=presentation_foreign_key))
essay_foreign_key = team['essay']
if essay_foreign_key:
media_to_update.append(Media(id=Media.render_key_name(MediaType.EXTERNAL_LINK, essay_foreign_key),
media_type_enum=MediaType.EXTERNAL_LINK,
media_tag_enum=[MediaTag.CHAIRMANS_ESSAY],
references=[team_reference],
year=team['year'],
foreign_key=essay_foreign_key))
MediaManipulator.createOrUpdate(media_to_update)
else:
logging.info("No Hall of Fame teams found")
teams = []
template_values = {
'teams': teams,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/hall_of_fame_teams_get.html')
self.response.out.write(template.render(path, template_values))
class TeamBlacklistWebsiteDo(webapp.RequestHandler):
"""
Blacklist the current website for a team
"""
def get(self, key_name):
team = Team.get_by_id(key_name)
if team.website:
WebsiteBlacklist.blacklist(team.website)
self.redirect('/backend-tasks/get/team_details/{}'.format(key_name))
| 40.340909 | 125 | 0.640704 | 5,354 | 0.754085 | 0 | 0 | 0 | 0 | 0 | 0 | 1,046 | 0.147324 |
6193b4548eb4ff84fedce496ec547a4d080086f6 | 3,841 | py | Python | src/bot/handlers/essence_part_handler.py | nchursin/claimant | 890ce1a3ced8db9d2e2fbddb8a3207e82ac05326 | [
"BSD-3-Clause"
] | 3 | 2022-03-03T19:10:25.000Z | 2022-03-03T19:57:15.000Z | src/bot/handlers/essence_part_handler.py | nchursin/claimant | 890ce1a3ced8db9d2e2fbddb8a3207e82ac05326 | [
"BSD-3-Clause"
] | 9 | 2022-03-03T18:56:37.000Z | 2022-03-29T18:34:02.000Z | src/bot/handlers/essence_part_handler.py | nchursin/claimant | 890ce1a3ced8db9d2e2fbddb8a3207e82ac05326 | [
"BSD-3-Clause"
] | 1 | 2022-03-04T11:59:11.000Z | 2022-03-04T11:59:11.000Z | from typing import Optional, List
from aiogram import types, Dispatcher, filters
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types import ReplyKeyboardMarkup
from handlers.common_actions_handlers import process_manual_enter, process_option_selection, \
process_complete_part_editing, claim_tmp_option_chosen, show_claim_tmp_example
from keyboards import emojis, get_common_start_kb, get_next_actions_kb, get_claim_parts_kb
from repository import Repository
from statistics import collect_statistic
CLAIM_PART: str = "essence"
class EssencePart(StatesGroup):
waiting_for_user_action = State()
waiting_for_option_chosen = State()
@collect_statistic(event_name="essence:start")
async def essence_start(message: types.Message, state: FSMContext):
repository: Repository = Repository()
claim_data: dict = repository.get_claim_data(message.from_user.id)
required_parts: List[str] = ["story"]
if claim_data.get("claim_data") is None or \
not any([part_name in claim_data["claim_data"].keys() for part_name in required_parts]):
claim_parts_kb: ReplyKeyboardMarkup = get_claim_parts_kb(message.from_user.id)
await message.reply("Пожалуйста, сперва заполните раздел 'фабула'.",
reply_markup=claim_parts_kb)
return
await EssencePart.waiting_for_user_action.set()
start_kb: ReplyKeyboardMarkup = get_common_start_kb()
await message.reply("Опишите суть нарушения. "
"Введите, почему вы считаете, что ваши права нарушают. "
"Или выберите одну из следующий опций.",
reply_markup=start_kb)
@collect_statistic(event_name="essence:show_example")
async def show_example(message: types.Message, state: FSMContext):
await show_claim_tmp_example(message, CLAIM_PART)
async def action_selected(message: types.Message, state: FSMContext):
option: Optional[str] = message.text
if option.endswith("выбрать из списка") or option.endswith("добавить еще из списка"):
await process_option_selection(message, CLAIM_PART, EssencePart)
return
if option.endswith("закончить заполнение"):
await process_complete_part_editing(message, state, CLAIM_PART)
return
await process_manual_enter(message, state, EssencePart)
async def option_chosen(callback_query: types.CallbackQuery, state: FSMContext):
await claim_tmp_option_chosen(callback_query, state, CLAIM_PART)
async def finish_option_choosing(callback_query: types.CallbackQuery):
await callback_query.answer()
await EssencePart.waiting_for_user_action.set()
next_actions_kb: ReplyKeyboardMarkup = get_next_actions_kb()
await callback_query.message.answer("Введите свой вариант самостоятельно. "
"Или выберите дальнейшее действие с помощью клавиатуры",
reply_markup=next_actions_kb)
def register_handlers(dp: Dispatcher):
dp.register_message_handler(essence_start, filters.Regexp(f"^{emojis.key} суть нарушения"))
dp.register_message_handler(show_example,
filters.Regexp(f"^{emojis.red_question_mark} показать пример"),
state=EssencePart.states)
dp.register_message_handler(action_selected, state=EssencePart.waiting_for_user_action)
dp.register_callback_query_handler(
option_chosen,
filters.Text(startswith="option"),
state=EssencePart.waiting_for_option_chosen
)
dp.register_callback_query_handler(finish_option_choosing,
filters.Text(equals="complete options"),
state=EssencePart.waiting_for_option_chosen)
| 44.149425 | 100 | 0.724291 | 109 | 0.026386 | 0 | 0 | 1,315 | 0.318325 | 2,447 | 0.592351 | 797 | 0.192931 |
6193bd05256f7e3ff92d7c5240b4a667fe8be22a | 4,523 | py | Python | SCRWebService/message_handling/message_forwarder.py | tomzo/integration-adaptors | d4f296d3e44475df6f69a78a27fac6ed5b67513b | [
"Apache-2.0"
] | null | null | null | SCRWebService/message_handling/message_forwarder.py | tomzo/integration-adaptors | d4f296d3e44475df6f69a78a27fac6ed5b67513b | [
"Apache-2.0"
] | 4 | 2021-03-31T19:46:30.000Z | 2021-03-31T19:55:03.000Z | SCRWebService/message_handling/message_forwarder.py | tomzo/integration-adaptors | d4f296d3e44475df6f69a78a27fac6ed5b67513b | [
"Apache-2.0"
] | 2 | 2020-04-02T11:22:17.000Z | 2021-04-11T07:24:48.000Z | """Module related to processing of an outbound message"""
from typing import Dict, Optional
from utilities import integration_adaptors_logger as log
from builder.pystache_message_builder import MessageGenerationError
from message_handling.message_sender import MessageSender
import xml.etree.ElementTree as ET
logger = log.IntegrationAdaptorsLogger('MSG-HANDLER')
class MessageSendingError(Exception):
"""Error raised during message sending"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class MessageForwarder(object):
"""Class to provide message forwarding functionality, in particular hl7 message population is performed."""
def __init__(self, interactions: dict, message_sender: MessageSender):
"""
Constructor for the message forwarder
:param interactions: A dictionary mapping human readable interaction names to the object that is responsible
for populating the associated message template
"""
self.interactions = interactions
self.message_sender = message_sender
async def forward_message_to_mhs(self, interaction_name: str,
message_contents: Dict,
message_id: Optional[str],
correlation_id: Optional[str]):
"""
Handles forwarding a given interaction to the MHS, including populating the appropriate message template
:param interaction_name: The human readable name associated with a particular interaction
:param message_contents: The dictionary parsed from the json body
:param correlation_id:
:param message_id:
:return: None
"""
templator = self._get_interaction_template_populator(interaction_name)
populated_message = self._populate_message_template(templator, message_contents)
response = await self._send_message_to_mhs(interaction_id=templator.interaction_id,
message=populated_message,
message_id=message_id,
correlation_id=correlation_id)
return templator.parse_response(response)
def _get_interaction_template_populator(self, interaction_name: str):
"""
Retrieves the template populater object for the given interaction_name
:param interaction_name: Human readable interaction id
:return: A template populator
"""
interaction_template_populator = self.interactions.get(interaction_name)
if not interaction_template_populator:
logger.error('001', 'Failed to find interaction templator for interaction name: {name}',
{'name': interaction_name})
raise MessageGenerationError(f'Failed to find interaction with interaction name: {interaction_name}')
return interaction_template_populator
def _populate_message_template(self, template_populator, supplier_message_parameters: Dict) -> str:
"""
Generates a hl7 message string from the parameters
:param template_populator:
:param supplier_message_parameters: The parameters to be populated into the message template
:return: hl7 message string with the populated values
"""
try:
return template_populator.populate_template(supplier_message_parameters)
except Exception as e:
logger.error('002', 'Message generation failed {exception}', {'exception': e})
raise MessageGenerationError(str(e))
async def _send_message_to_mhs(self, interaction_id: str,
message: str,
message_id: Optional[str],
correlation_id: Optional[str]):
"""
Using the message sender dependency, the generated message is forwarded to the mhs
:param interaction_id: The interaction id used as part of the header
:param message: hl7 message body
:return: The response from the mhs of sending the
"""
try:
return await self.message_sender.send_message_to_mhs(interaction_id, message, message_id, correlation_id)
except Exception as e:
logger.error('003', 'Exception raised during message sending: {exception}', {'exception': e})
raise MessageSendingError(str(e))
| 47.610526 | 117 | 0.661729 | 4,152 | 0.917975 | 0 | 0 | 0 | 0 | 2,033 | 0.44948 | 1,856 | 0.410347 |
619557a199ed9607989d1a9b61ecbecc26b2b6f6 | 809 | py | Python | HackerRank/Alphabet Rangoli/solution.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
] | 9 | 2020-07-02T06:06:17.000Z | 2022-02-26T11:08:09.000Z | HackerRank/Alphabet Rangoli/solution.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
] | 1 | 2021-11-04T17:26:36.000Z | 2021-11-04T17:26:36.000Z | HackerRank/Alphabet Rangoli/solution.py | nikku1234/Code-Practise | 94eb6680ea36efd10856c377000219285f77e5a4 | [
"Apache-2.0"
] | 8 | 2021-01-31T10:31:12.000Z | 2022-03-13T09:15:55.000Z | import string
def print_rangoli(size):
# your code goes here
'''
alphabets = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
symbol = '-'
array_size = size + 3*(size-1)
print(array_size)
for i in range(0,array_size):
for left_side in range(0,int(array_size-i)/2):
print(symbol),
for i in range(i+1):
print(alphabets[size-1])
for right_side in range(0,int(array_size-i)/2):
print(symbol),
print("\n")
'''
alphabets = string.ascii_lowercase
L = []
for i in range(size):
s = "-".join(alphabets[i:size])
#print(s)
L.append((s[::-1]+s[1:]).center(4*size-3, "-"))
#print(L)
print('\n'.join(L[:0:-1]+L))
| 25.28125 | 121 | 0.483313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 539 | 0.666255 |
61986b8629c565625618497302e6f724f841c131 | 779 | py | Python | src/visualize/visualize_checkpoint.py | Immocat/ACTOR | c7237e82e333bf2c57f7d8e12f27d0831233befc | [
"MIT"
] | 164 | 2021-09-06T12:43:39.000Z | 2022-03-29T02:33:38.000Z | src/visualize/visualize_checkpoint.py | Immocat/ACTOR | c7237e82e333bf2c57f7d8e12f27d0831233befc | [
"MIT"
] | 14 | 2021-09-17T00:42:24.000Z | 2022-03-07T04:18:12.000Z | src/visualize/visualize_checkpoint.py | Immocat/ACTOR | c7237e82e333bf2c57f7d8e12f27d0831233befc | [
"MIT"
] | 27 | 2021-09-07T04:38:38.000Z | 2022-03-29T00:37:10.000Z | import os
import matplotlib.pyplot as plt
import torch
from src.utils.get_model_and_data import get_model_and_data
from src.parser.visualize import parser
from .visualize import viz_epoch
import src.utils.fixseed # noqa
plt.switch_backend('agg')
def main():
# parse options
parameters, folder, checkpointname, epoch = parser()
model, datasets = get_model_and_data(parameters)
dataset = datasets["train"]
print("Restore weights..")
checkpointpath = os.path.join(folder, checkpointname)
state_dict = torch.load(checkpointpath, map_location=parameters["device"])
model.load_state_dict(state_dict)
# visualize_params
viz_epoch(model, dataset, epoch, parameters, folder=folder, writer=None)
if __name__ == '__main__':
main()
| 24.34375 | 78 | 0.741977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.112965 |
619f55e5cd0fde19ef49d00d5c74c41a8e978000 | 938 | py | Python | APIs/Biquery/src/infra/models/datasets.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | APIs/Biquery/src/infra/models/datasets.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | APIs/Biquery/src/infra/models/datasets.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | from enum import Enum
from tortoise import fields
from tortoise.models import Model
class Status(str, Enum):
PENDING = "pending"
IMPORTED = "imported"
class Datasets(Model):
id = fields.CharField(36, pk=True)
provider = fields.CharField(10, null=False)
provider_id = fields.CharField(255, null=False)
name = fields.CharField(255, null=False)
size = fields.IntField(default=0, null=False)
status = fields.CharEnumField(Status, default=Status.PENDING)
created_at = fields.DatetimeField(auto_now_add=True)
updated_at = fields.DatetimeField(auto_now=True)
def serialize(self):
return {
'id': self.id,
'provider': self.provider,
'name': self.name,
'size': self.size,
'status': self.status.value,
'createdAt': self.created_at.timestamp() * 1000,
'updatedAt': self.updated_at.timestamp() * 1000,
} | 32.344828 | 65 | 0.643923 | 850 | 0.906183 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.079957 |
61a07bb292fcae2ff0accde2ff05fef346e1cb96 | 6,046 | py | Python | src/python/pipelines_utils/file_utils.py | InformaticsMatters/pipelines-utils | 07182505500fe2e3ab1db29332f6a7ec98b0c1b6 | [
"Apache-2.0"
] | null | null | null | src/python/pipelines_utils/file_utils.py | InformaticsMatters/pipelines-utils | 07182505500fe2e3ab1db29332f6a7ec98b0c1b6 | [
"Apache-2.0"
] | 33 | 2018-01-23T10:20:41.000Z | 2019-07-09T14:25:33.000Z | src/python/pipelines_utils/file_utils.py | InformaticsMatters/pipelines-utils | 07182505500fe2e3ab1db29332f6a7ec98b0c1b6 | [
"Apache-2.0"
] | 1 | 2018-10-31T06:04:08.000Z | 2018-10-31T06:04:08.000Z | #!/usr/bin/env python
# Copyright 2018 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from . import utils
# Files are normally located in sub-directories of the pipeline module path.
# For example a pipeline module 'pipeline_a.py' that expects to use a file
# or SDF picker would place its files in the directory
# 'pipelines/demo/pipeline_a'.
def pick(filename, directory=None):
"""Returns the named file. If directory is not specified the file is
expected to be located in a sub-directory whose name matches
that of the calling module otherwise the file is expected to be found in
the named directory.
:param filename: The file, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file, or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
return file_path if os.path.isfile(file_path) else None
def pick_sdf(filename, directory=None):
"""Returns a full path to the chosen SDF file. The supplied file
is not expected to contain a recognised SDF extension, this is added
automatically.
If a file with the extension `.sdf.gz` or `.sdf` is found the path to it
(excluding the extension) is returned. If this fails, `None` is returned.
:param filename: The SDF file basename, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file without extension,
or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path + '.sdf.gz'):
return file_path + '.sdf.gz'
elif os.path.isfile(file_path + '.sdf'):
return file_path + '.sdf'
# Couldn't find a suitable SDF file
return None
def pick_csv(filename, directory=None):
"""Returns a full path to the chosen CSV file. The supplied file
is not expected to contain a recognised CSV extension, this is added
automatically.
If a file with the extension `.csv.gz` or `.csv` is found the path to it
(excluding the extension) is returned. If this fails, `None` is returned.
:param filename: The CSV file basename, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file without extension,
or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path + '.csv.gz'):
return file_path + '.csv.gz'
elif os.path.isfile(file_path + '.csv'):
return file_path + '.csv'
# Couldn't find a suitable CSV file
return None
def pick_smi(filename, directory=None):
"""Returns a full path to the chosen SMI file. The supplied file
is not expected to contain a recognised SMI extension, this is added
automatically.
If a file with the extension `.smi.gz` or `.smi` is found the path to it
(excluding the extension) is returned. If this fails, `None` is returned.
:param filename: The SMI file basename, whose path is required.
:type filename: ``str``
:param directory: An optional directory.
If not provided it is calculated automatically.
:type directory: ``str``
:return: The full path to the file without extension,
or None if it does not exist
:rtype: ``str``
"""
if directory is None:
directory = utils.get_undecorated_calling_module()
# If the 'cwd' is not '/output' (which indicates we're in a Container)
# then remove the CWD and the anticipated '/'
# from the front of the module
if os.getcwd() not in ['/output']:
directory = directory[len(os.getcwd()) + 1:]
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path + '.smi.gz'):
return file_path + '.smi.gz'
elif os.path.isfile(file_path + '.smi'):
return file_path + '.smi'
# Couldn't find a suitable SMI file
return None
| 40.306667 | 78 | 0.66391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,238 | 0.700959 |
61a0f5abd8c986c58949f55434b22326da67e0f2 | 13,154 | py | Python | devlin2/modular_square/rtl/gen_reduction_lut.py | supranational/-vdf-fpga-round3-results | 5914821abc400cc21b97b93af01a675f3f009fd3 | [
"BSD-3-Clause"
] | 44 | 2019-08-01T20:52:10.000Z | 2021-09-15T02:08:41.000Z | devlin2/modular_square/rtl/gen_reduction_lut.py | supranational/-vdf-fpga-round3-results | 5914821abc400cc21b97b93af01a675f3f009fd3 | [
"BSD-3-Clause"
] | null | null | null | devlin2/modular_square/rtl/gen_reduction_lut.py | supranational/-vdf-fpga-round3-results | 5914821abc400cc21b97b93af01a675f3f009fd3 | [
"BSD-3-Clause"
] | 19 | 2019-07-25T02:02:02.000Z | 2022-01-27T15:53:30.000Z | #!/usr/bin/python3
################################################################################
# Copyright 2019 Supranational LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
import getopt
################################################################################
# Parameters to set
################################################################################
REDUNDANT_ELEMENTS = 2
NONREDUNDANT_ELEMENTS = 8
NUM_SEGMENTS = 4
WORD_LEN = 16
EXTRA_ELEMENTS = 2
NUM_URAM = 0
# TODO - we probably don't need these hardcoded values anymore
if (NONREDUNDANT_ELEMENTS == 128):
M = 6314466083072888893799357126131292332363298818330841375588990772701957128924885547308446055753206513618346628848948088663500368480396588171361987660521897267810162280557475393838308261759713218926668611776954526391570120690939973680089721274464666423319187806830552067951253070082020241246233982410737753705127344494169501180975241890667963858754856319805507273709904397119733614666701543905360152543373982524579313575317653646331989064651402133985265800341991903982192844710212464887459388853582070318084289023209710907032396934919962778995323320184064522476463966355937367009369212758092086293198727008292431243681
else:
M = 302934307671667531413257853548643485645
try:
opts, args = getopt.getopt(sys.argv[1:],"hM:r:n:w:", \
["modulus=","redundant=", \
"nonredundant=", "wordlen=", "urams="])
except getopt.GetoptError:
print ('gen_reduction_lut.py -M <modulus> -r <num redundant>', \
'-nr <num nonredundant> -wl <word length> -u <num_uram>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('gen_reduction_lut.py -M <modulus> -r <num redundant>', \
'-nr <num nonredundant> -wl <word length> -u <num_uram>')
sys.exit()
elif opt in ("-M", "--modulus"):
M = int(arg)
elif opt in ("-r", "--redundant"):
REDUNDANT_ELEMENTS = int(arg)
elif opt in ("-n", "--nonredundant"):
NONREDUNDANT_ELEMENTS = int(arg)
elif opt in ("-w", "--wordlen"):
WORD_LEN = int(arg)
elif opt in ("-u", "--urams"):
NUM_URAM = int(arg)
print ()
print ('Parameter Values')
print ('---------------------')
print ('REDUNDANT_ELEMENTS ', REDUNDANT_ELEMENTS)
print ('NONREDUNDANT_ELEMENTS', NONREDUNDANT_ELEMENTS)
print ('WORD_LEN ', WORD_LEN)
print ('NUM_SEGMENTS ', NUM_SEGMENTS)
print ('EXTRA_ELEMENTS ', EXTRA_ELEMENTS)
print ('M ', hex(M))
print ()
################################################################################
# Calculated parameters
################################################################################
SEGMENT_ELEMENTS = (NONREDUNDANT_ELEMENTS // NUM_SEGMENTS)
LUT_NUM_ELEMENTS = REDUNDANT_ELEMENTS + (SEGMENT_ELEMENTS*2) + \
EXTRA_ELEMENTS
LOOK_UP_WIDTH = WORD_LEN // 2
LUT_SIZE = 2**LOOK_UP_WIDTH
LUT_WIDTH = WORD_LEN * NONREDUNDANT_ELEMENTS;
# Sanitize URAM and BRAM counts
if NUM_URAM > LUT_NUM_ELEMENTS - 1:
NUM_URAM = LUT_NUM_ELEMENTS - 1
NUM_BRAM = LUT_NUM_ELEMENTS - NUM_URAM
################################################################################
# Compute the reduction tables
################################################################################
print ('Creating', LUT_NUM_ELEMENTS, 'files')
print ('reduction_lut_{0:03d}.dat'.format(0))
print (' ... ')
print ('reduction_lut_{0:03d}.dat'.format(LUT_NUM_ELEMENTS-1))
for i in range (LUT_NUM_ELEMENTS):
Filename = list('reduction_lut_{0:03d}.dat'.format(i))
f = open(''.join(Filename), 'w')
# Polynomial degree offset for V7V6
offset = (SEGMENT_ELEMENTS*2)
# Compute base reduction value for the coefficient degree
t_v7v6 = (2**((i + NONREDUNDANT_ELEMENTS + offset) * WORD_LEN)) % M
t_v5v4 = (2**((i + NONREDUNDANT_ELEMENTS) * WORD_LEN)) % M
# Each address represents a different value stored in the coefficient
for j in range (LUT_SIZE):
cur_v7v6 = (t_v7v6 * j) % M
f.write(hex(cur_v7v6)[2:].zfill(LUT_WIDTH // 4))
f.write('\n')
for j in range (LUT_SIZE):
cur_v5v4 = (t_v5v4 * j) % M
f.write(hex(cur_v5v4)[2:].zfill(LUT_WIDTH // 4))
f.write('\n')
f.close()
################################################################################
# Generate RTL to read in files
################################################################################
# This should not really be necessary
# Required since parameterizing RTL to read in data was failing synthesis
f = open('reduction_lut.sv', 'w')
emit = \
'''/*******************************************************************************
Copyright 2019 Supranational LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*******************************************************************************/
module reduction_lut
#(
parameter int REDUNDANT_ELEMENTS = 2,
parameter int NONREDUNDANT_ELEMENTS = 8,
parameter int NUM_SEGMENTS = 4,
parameter int WORD_LEN = 16,
parameter int BIT_LEN = 17,
parameter int DIN_LEN = 8,
parameter int NUM_ELEMENTS = REDUNDANT_ELEMENTS+
NONREDUNDANT_ELEMENTS,
parameter int LOOK_UP_WIDTH = int\'(WORD_LEN / 2),
parameter int SEGMENT_ELEMENTS = int\'(NONREDUNDANT_ELEMENTS /
NUM_SEGMENTS),
parameter int EXTRA_ELEMENTS = 2,
parameter int LUT_NUM_ELEMENTS = REDUNDANT_ELEMENTS+EXTRA_ELEMENTS+
(SEGMENT_ELEMENTS*2)
)
(
input logic clk,
input logic [LOOK_UP_WIDTH:0] lut_addr[LUT_NUM_ELEMENTS],
input logic shift_high,
input logic shift_overflow,
output logic [BIT_LEN-1:0] lut_data[NUM_ELEMENTS][LUT_NUM_ELEMENTS],
'''
f.write(emit)
if NUM_URAM == 0:
f.write("/* verilator lint_off UNUSED */")
emit = \
'''
input we,
input [DIN_LEN-1:0] din,
input din_valid
'''
f.write(emit)
if NUM_URAM == 0:
f.write("/* verilator lint_on UNUSED */")
emit = \
'''
);
// There is twice as many entries due to low and high values
localparam int NUM_LUT_ENTRIES = 2**(LOOK_UP_WIDTH+1);
localparam int LUT_WIDTH = WORD_LEN * NONREDUNDANT_ELEMENTS;
localparam int NUM_URAM = %(NUM_URAM)s;
localparam int NUM_BRAM = LUT_NUM_ELEMENTS-NUM_URAM;
localparam int XFERS_PER_URAM = (LUT_WIDTH*NUM_LUT_ENTRIES)/DIN_LEN;
logic [LUT_WIDTH-1:0] lut_read_data[LUT_NUM_ELEMENTS];
'''
f.write(emit % {'NUM_URAM':NUM_URAM})
##########################################################################
# URAM Only
##########################################################################
if NUM_URAM > 0:
f.write(" logic [LUT_WIDTH-1:0] lut_read_data_uram[NUM_URAM];")
emit = \
'''
logic [NUM_URAM-1:0] we_uram;
genvar i;
generate
for(i = 0; i < NUM_URAM; i++) begin : urams
uram_wide #(.DATA_LEN(NONREDUNDANT_ELEMENTS*WORD_LEN),
.ADDR_LEN(LOOK_UP_WIDTH+1),
.DIN_LEN(DIN_LEN)
)
u1(.clk(clk),
.we(we_uram[i] && we),
.din(din),
.din_valid(din_valid),
.addr(lut_addr[i]),
.dout(lut_read_data_uram[i]));
end
endgenerate
// Enable writing data into the URAMs
logic [$clog2(XFERS_PER_URAM):0] write_uram_xfers;
always_ff @(posedge clk) begin
if(!we) begin
we_uram <= {{(NUM_URAM-1){1'b0}}, 1'b1};
write_uram_xfers <= XFERS_PER_URAM[$clog2(XFERS_PER_URAM):0];
end else if(|we_uram && din_valid) begin
if(write_uram_xfers == 1) begin
we_uram <= we_uram << 1;
write_uram_xfers <= XFERS_PER_URAM[$clog2(XFERS_PER_URAM):0];
end else begin
write_uram_xfers <= write_uram_xfers - 1;
end
end
end
'''
f.write(emit)
##########################################################################
# BRAM Only
##########################################################################
if NUM_BRAM > 0:
f.write(" logic [LUT_WIDTH-1:0] lut_read_data_bram[NUM_BRAM];")
emit = \
'''
logic [BIT_LEN-1:0] lut_output[NUM_ELEMENTS][LUT_NUM_ELEMENTS];
// Delay to align with data from memory
logic shift_high_1d;
logic shift_overflow_1d;
always_ff @(posedge clk) begin
shift_high_1d <= shift_high;
shift_overflow_1d <= shift_overflow;
end
'''
f.write(emit)
block_str = ' (* rom_style = "block" *) logic [LUT_WIDTH-1:0] lut_{0:03d}[NUM_LUT_ENTRIES];\n'
for i in range (NUM_BRAM):
f.write(block_str.format(i+NUM_URAM))
read_str = ' $readmemh("reduction_lut_{0:03d}.dat", lut_{0:03d});\n'
f.write('\n initial begin\n')
for i in range (NUM_BRAM):
f.write(read_str.format(i+NUM_URAM))
f.write(' end\n')
#assign_str = ' lut_read_data[{0:d}] <= lut_{0:03d}[lut_addr[{0:d}]][LUT_WIDTH-1:0];\n'
assign_str = ' lut_read_data_bram[{0:d}] <= lut_{1:03d}[lut_addr[{1:d}]];\n'
f.write('\n always_ff @(posedge clk) begin\n')
for i in range (NUM_BRAM):
f.write(assign_str.format(i, i+NUM_URAM))
f.write(' end\n')
##########################################################################
# Mixed URAM/BRAM
##########################################################################
emit = \
'''
// Read data out of the memories
always_comb begin
'''
f.write(emit)
emit = \
'''
for (int k=0; k<NUM_URAM; k=k+1) begin
lut_read_data[k] = lut_read_data_uram[k];
end
'''
if NUM_URAM > 0:
f.write(emit)
emit = \
'''
for (int k=0; k<NUM_BRAM; k=k+1) begin
lut_read_data[k+NUM_URAM] = lut_read_data_bram[k];
end
'''
if NUM_BRAM > 0:
f.write(emit)
emit = \
'''
end
always_comb begin
for (int k=0; k<LUT_NUM_ELEMENTS; k=k+1) begin
for (int l=NONREDUNDANT_ELEMENTS; l<NUM_ELEMENTS; l=l+1) begin
lut_output[l][k] = '0;
end
end
for (int k=0; k<LUT_NUM_ELEMENTS; k=k+1) begin
for (int l=0; l<NONREDUNDANT_ELEMENTS+1; l=l+1) begin
// TODO - should be unique, fails when in reset
if (shift_high_1d) begin
if (l < NONREDUNDANT_ELEMENTS) begin
lut_output[l][k][BIT_LEN-1:LOOK_UP_WIDTH] =
{{(BIT_LEN-WORD_LEN){1'b0}},
lut_read_data[k][(l*WORD_LEN)+:LOOK_UP_WIDTH]};
end
if (l == 0) begin
lut_output[l][k][LOOK_UP_WIDTH-1:0] = \'0;
end
else begin
lut_output[l][k][LOOK_UP_WIDTH-1:0] =
lut_read_data[k][((l-1)*WORD_LEN)+LOOK_UP_WIDTH+:LOOK_UP_WIDTH];
end
end
else if (shift_overflow_1d) begin
if (l == 0) begin
lut_output[l][k] = \'0;
end
else begin
lut_output[l][k] =
{{(BIT_LEN-WORD_LEN){1\'b0}},
lut_read_data[k][((l-1)*WORD_LEN)+:WORD_LEN]};
end
end
else begin
if (l < NONREDUNDANT_ELEMENTS) begin
lut_output[l][k] =
{{(BIT_LEN-WORD_LEN){1\'b0}},
lut_read_data[k][(l*WORD_LEN)+:WORD_LEN]};
end
end
end
end
end
// Need above loops in combo block for Verilator to process
always_comb begin
lut_data = lut_output;
end
endmodule
'''
f.write(emit)
f.close()
| 34.615789 | 623 | 0.541508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,587 | 0.728828 |
61a1dc570a9218509b77bdf2225164ed8e7e0a90 | 23,422 | py | Python | StinoStarter.py | geekroo/Stino | c3f416c807857fb4c2180219d984c4fcc83678d6 | [
"MIT"
] | 1 | 2016-12-28T05:36:20.000Z | 2016-12-28T05:36:20.000Z | StinoStarter.py | MHaroonBaig/Stino | c3f416c807857fb4c2180219d984c4fcc83678d6 | [
"MIT"
] | null | null | null | StinoStarter.py | MHaroonBaig/Stino | c3f416c807857fb4c2180219d984c4fcc83678d6 | [
"MIT"
] | 1 | 2018-11-25T19:36:45.000Z | 2018-11-25T19:36:45.000Z | #-*- coding: utf-8 -*-
# StinoStarter.py
import os
import sublime
import sublime_plugin
st_version = int(sublime.version())
if st_version < 3000:
import app
else:
from . import app
class SketchListener(sublime_plugin.EventListener):
def on_activated(self, view):
pre_active_sketch = app.constant.global_settings.get('active_sketch', '')
if not app.sketch.isInEditor(view):
return
app.active_file.setView(view)
active_sketch = app.active_file.getSketchName()
app.constant.global_settings.set('active_sketch', active_sketch)
if app.active_file.isSrcFile():
app.active_serial_listener.start()
temp_global = app.constant.global_settings.get('temp_global', False)
if temp_global:
app.constant.global_settings.set('global_settings', False)
app.constant.global_settings.set('temp_global', False)
global_settings = app.constant.global_settings.get('global_settings', True)
if not global_settings:
if not (active_sketch == pre_active_sketch):
folder = app.active_file.getFolder()
app.constant.sketch_settings.changeFolder(folder)
app.arduino_info.refresh()
app.main_menu.refresh()
else:
app.active_serial_listener.stop()
global_settings = app.constant.global_settings.get('global_settings', True)
if not global_settings:
app.constant.global_settings.set('global_settings', True)
app.constant.global_settings.set('temp_global', True)
folder = app.constant.stino_root
app.constant.sketch_settings.changeFolder(folder)
app.arduino_info.refresh()
app.main_menu.refresh()
def on_close(self, view):
if app.serial_monitor.isMonitorView(view):
name = view.name()
serial_port = name.split('-')[1].strip()
if serial_port in app.constant.serial_in_use_list:
cur_serial_monitor = app.constant.serial_monitor_dict[serial_port]
cur_serial_monitor.stop()
app.constant.serial_in_use_list.remove(serial_port)
class ShowArduinoMenuCommand(sublime_plugin.WindowCommand):
def run(self):
show_arduino_menu = not app.constant.global_settings.get('show_arduino_menu', True)
app.constant.global_settings.set('show_arduino_menu', show_arduino_menu)
app.main_menu.refresh()
def is_checked(self):
state = app.constant.global_settings.get('show_arduino_menu', True)
return state
class NewSketchCommand(sublime_plugin.WindowCommand):
def run(self):
caption = app.i18n.translate('Name for New Sketch')
self.window.show_input_panel(caption, '', self.on_done, None, None)
def on_done(self, input_text):
sketch_name = input_text
if sketch_name:
sketch_file = app.base.newSketch(sketch_name)
if sketch_file:
self.window.open_file(sketch_file)
app.arduino_info.refresh()
app.main_menu.refresh()
else:
app.output_console.printText('A sketch (or folder) named "%s" already exists. Could not create the sketch.\n' % sketch_name)
class OpenSketchCommand(sublime_plugin.WindowCommand):
def run(self, folder):
app.sketch.openSketchFolder(folder)
class ImportLibraryCommand(sublime_plugin.WindowCommand):
def run(self, folder):
view = app.active_file.getView()
self.window.active_view().run_command('save')
app.sketch.importLibrary(view, folder)
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class ShowSketchFolderCommand(sublime_plugin.WindowCommand):
def run(self):
folder = app.active_file.getFolder()
url = 'file://' + folder
sublime.run_command('open_url', {'url': url})
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class SetExtraFlagCommand(sublime_plugin.WindowCommand):
def run(self):
extra_flag = app.constant.sketch_settings.get('extra_flag', '')
caption = app.i18n.translate('Extra compilation flags:')
self.window.show_input_panel(caption, extra_flag, self.on_done, None, None)
def on_done(self, input_text):
extra_flag = input_text
stino.constant.sketch_settings.set('extra_flag', extra_flags)
class CompileSketchCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.active_view().run_command('save')
cur_folder = app.active_file.getFolder()
cur_project = app.sketch.Project(cur_folder)
args = app.compiler.Args(cur_project, app.arduino_info)
compiler = app.compiler.Compiler(app.arduino_info, cur_project, args)
compiler.run()
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class UploadSketchCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.active_view().run_command('save')
cur_folder = app.active_file.getFolder()
cur_project = app.sketch.Project(cur_folder)
args = app.compiler.Args(cur_project, app.arduino_info)
compiler = app.compiler.Compiler(app.arduino_info, cur_project, args)
compiler.run()
uploader = app.uploader.Uploader(args, compiler)
uploader.run()
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class UploadUsingProgrammerCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.active_view().run_command('save')
cur_folder = app.active_file.getFolder()
cur_project = app.sketch.Project(cur_folder)
args = app.compiler.Args(cur_project, app.arduino_info)
compiler = app.compiler.Compiler(app.arduino_info, cur_project, args)
compiler.run()
uploader = app.uploader.Uploader(args, compiler, mode = 'programmer')
uploader.run()
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
platform_list = app.arduino_info.getPlatformList()
platform_id = app.constant.sketch_settings.get('platform', -1)
if (platform_id > 0) and (platform_id < len(platform_list)):
platform = platform_list[platform_id]
programmer_list = platform.getProgrammerList()
if programmer_list:
state = True
return state
class ChooseBoardCommand(sublime_plugin.WindowCommand):
def run(self, platform, board):
cur_platform = app.arduino_info.getPlatformList()[platform]
app.constant.sketch_settings.set('platform', platform)
app.constant.sketch_settings.set('platform_name', cur_platform.getName())
app.constant.sketch_settings.set('board', board)
app.main_menu.refresh()
app.constant.sketch_settings.set('full_compilation', True)
def is_checked(self, platform, board):
state = False
chosen_platform = app.constant.sketch_settings.get('platform', -1)
chosen_board = app.constant.sketch_settings.get('board', -1)
if platform == chosen_platform and board == chosen_board:
state = True
return state
class ChooseBoardOptionCommand(sublime_plugin.WindowCommand):
def run(self, board_option, board_option_item):
has_setted = False
chosen_platform = app.constant.sketch_settings.get('platform')
chosen_board = app.constant.sketch_settings.get('board')
board_id = str(chosen_platform) + '.' + str(chosen_board)
board_option_settings = app.constant.sketch_settings.get('board_option', {})
if board_id in board_option_settings:
cur_board_option_setting = board_option_settings[board_id]
if board_option < len(cur_board_option_setting):
has_setted = True
if not has_setted:
platform_list = app.arduino_info.getPlatformList()
cur_platform = platform_list[chosen_platform]
board_list = cur_platform.getBoardList()
cur_board = board_list[chosen_board]
board_option_list = cur_board.getOptionList()
board_option_list_number = len(board_option_list)
cur_board_option_setting = []
for i in range(board_option_list_number):
cur_board_option_setting.append(0)
cur_board_option_setting[board_option] = board_option_item
board_option_settings[board_id] = cur_board_option_setting
app.constant.sketch_settings.set('board_option', board_option_settings)
app.constant.sketch_settings.set('full_compilation', True)
def is_checked(self, board_option, board_option_item):
state = False
chosen_platform = app.constant.sketch_settings.get('platform', -1)
chosen_board = app.constant.sketch_settings.get('board', -1)
board_id = str(chosen_platform) + '.' + str(chosen_board)
board_option_settings = app.constant.sketch_settings.get('board_option', {})
if board_id in board_option_settings:
cur_board_option_setting = board_option_settings[board_id]
if board_option < len(cur_board_option_setting):
chosen_board_option_item = cur_board_option_setting[board_option]
if board_option_item == chosen_board_option_item:
state = True
return state
class ChooseProgrammerCommand(sublime_plugin.WindowCommand):
def run(self, platform, programmer):
programmer_settings = app.constant.sketch_settings.get('programmer', {})
programmer_settings[str(platform)] = programmer
app.constant.sketch_settings.set('programmer', programmer_settings)
def is_checked(self, platform, programmer):
state = False
programmer_settings = app.constant.sketch_settings.get('programmer', {})
if str(platform) in programmer_settings:
chosen_programmer = programmer_settings[str(platform)]
if programmer == chosen_programmer:
state = True
return state
class BurnBootloaderCommand(sublime_plugin.WindowCommand):
def run(self):
cur_folder = app.active_file.getFolder()
cur_project = app.sketch.Project(cur_folder)
args = app.compiler.Args(cur_project, app.arduino_info)
bootloader = app.uploader.Bootloader(cur_project, args)
bootloader.burn()
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class ChooseSerialPortCommand(sublime_plugin.WindowCommand):
def run(self, serial_port):
app.constant.sketch_settings.set('serial_port', serial_port)
def is_checked(self, serial_port):
state = False
chosen_serial_port = app.constant.sketch_settings.get('serial_port', -1)
if serial_port == chosen_serial_port:
state = True
return state
class StartSerialMonitorCommand(sublime_plugin.WindowCommand):
def run(self):
serial_port_id = app.constant.sketch_settings.get('serial_port', 0)
serial_port_list = app.serial.getSerialPortList()
serial_port = serial_port_list[serial_port_id]
if serial_port in app.constant.serial_in_use_list:
cur_serial_monitor = app.constant.serial_monitor_dict[serial_port]
else:
cur_serial_monitor = app.serial_monitor.SerialMonitor(serial_port)
app.constant.serial_in_use_list.append(serial_port)
app.constant.serial_monitor_dict[serial_port] = cur_serial_monitor
cur_serial_monitor.start()
self.window.run_command('send_serial_text')
def is_enabled(self):
state = False
serial_port_list = app.serial.getSerialPortList()
if serial_port_list:
serial_port_id = app.constant.sketch_settings.get('serial_port', 0)
serial_port = serial_port_list[serial_port_id]
if serial_port in app.constant.serial_in_use_list:
cur_serial_monitor = app.constant.serial_monitor_dict[serial_port]
if not cur_serial_monitor.isRunning():
state = True
else:
state = True
return state
class StopSerialMonitorCommand(sublime_plugin.WindowCommand):
def run(self):
serial_port_id = app.constant.sketch_settings.get('serial_port', 0)
serial_port_list = app.serial.getSerialPortList()
serial_port = serial_port_list[serial_port_id]
cur_serial_monitor = app.constant.serial_monitor_dict[serial_port]
cur_serial_monitor.stop()
def is_enabled(self):
state = False
serial_port_list = app.serial.getSerialPortList()
if serial_port_list:
serial_port_id = app.constant.sketch_settings.get('serial_port', 0)
serial_port = serial_port_list[serial_port_id]
if serial_port in app.constant.serial_in_use_list:
cur_serial_monitor = app.constant.serial_monitor_dict[serial_port]
if cur_serial_monitor.isRunning():
state = True
return state
class SendSerialTextCommand(sublime_plugin.WindowCommand):
def run(self):
self.caption = 'Send'
self.window.show_input_panel(self.caption, '', self.on_done, None, None)
def on_done(self, input_text):
serial_port_id = app.constant.sketch_settings.get('serial_port', 0)
serial_port_list = app.serial.getSerialPortList()
serial_port = serial_port_list[serial_port_id]
cur_serial_monitor = app.constant.serial_monitor_dict[serial_port]
cur_serial_monitor.send(input_text)
self.window.show_input_panel(self.caption, '', self.on_done, None, None)
def is_enabled(self):
state = False
serial_port_list = app.serial.getSerialPortList()
if serial_port_list:
serial_port_id = app.constant.sketch_settings.get('serial_port', 0)
serial_port = serial_port_list[serial_port_id]
if serial_port in app.constant.serial_in_use_list:
cur_serial_monitor = app.constant.serial_monitor_dict[serial_port]
if cur_serial_monitor.isRunning():
state = True
return state
class ChooseLineEndingCommand(sublime_plugin.WindowCommand):
def run(self, line_ending):
app.constant.sketch_settings.set('line_ending', line_ending)
def is_checked(self, line_ending):
state = False
chosen_line_ending = app.constant.sketch_settings.get('line_ending', 0)
if line_ending == chosen_line_ending:
state = True
return state
class ChooseDisplayModeCommand(sublime_plugin.WindowCommand):
def run(self, display_mode):
app.constant.sketch_settings.set('display_mode', display_mode)
def is_checked(self, display_mode):
state = False
chosen_display_mode = app.constant.sketch_settings.get('display_mode', 0)
if display_mode == chosen_display_mode:
state = True
return state
class ChooseBaudrateCommand(sublime_plugin.WindowCommand):
def run(self, baudrate):
app.constant.sketch_settings.set('baudrate', baudrate)
def is_checked(self, baudrate):
state = False
chosen_baudrate = app.constant.sketch_settings.get('baudrate', -1)
if baudrate == chosen_baudrate:
state = True
return state
def is_enabled(self):
state = True
serial_port_list = app.serial.getSerialPortList()
if serial_port_list:
serial_port_id = app.constant.sketch_settings.get('serial_port', 0)
serial_port = serial_port_list[serial_port_id]
if serial_port in app.constant.serial_in_use_list:
cur_serial_monitor = app.constant.serial_monitor_dict[serial_port]
if cur_serial_monitor.isRunning():
state = False
return state
class AutoFormatCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command('reindent', {'single_line': False})
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class ArchiveSketchCommand(sublime_plugin.WindowCommand):
def run(self):
root_list = app.fileutil.getOSRootList()
self.top_folder_list = root_list
self.folder_list = self.top_folder_list
self.level = 0
self.show_panel()
def show_panel(self):
folder_name_list = app.fileutil.getFolderNameList(self.folder_list)
sublime.set_timeout(lambda: self.window.show_quick_panel(folder_name_list, self.on_done), 10)
def on_done(self, index):
is_finished = False
if index == -1:
return
if self.level != 0 and index == 0:
chosen_folder = self.folder_list[index]
chosen_folder = chosen_folder.split('(')[1]
chosen_folder = chosen_folder[:-1]
source_folder = app.active_file.getFolder()
sketch_name = app.active_file.getSketchName()
zip_file_name = sketch_name + '.zip'
zip_file = os.path.join(chosen_folder, zip_file_name)
return_code = app.tools.archiveSketch(source_folder, zip_file)
if return_code == 0:
app.output_console.printText(app.i18n.translate('Writing {0} done.\n', [zip_file]))
else:
app.output_console.printText(app.i18n.translate('Writing {0} failed.\n', [zip_file]))
else:
(self.folder_list, self.level) = app.fileutil.enterNextLevel(index, self.folder_list, self.level, self.top_folder_list)
self.show_panel()
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class ChooseArduinoFolderCommand(sublime_plugin.WindowCommand):
def run(self):
root_list = app.fileutil.getOSRootList()
self.top_folder_list = root_list
self.folder_list = self.top_folder_list
self.level = 0
self.show_panel()
def show_panel(self):
folder_name_list = app.fileutil.getFolderNameList(self.folder_list)
sublime.set_timeout(lambda: self.window.show_quick_panel(folder_name_list, self.on_done), 10)
def on_done(self, index):
is_finished = False
if index == -1:
return
chosen_folder = self.folder_list[index]
if app.base.isArduinoFolder(chosen_folder):
app.output_console.printText(app.i18n.translate('Arduino Application is found at {0}.\n', [chosen_folder]))
app.constant.sketch_settings.set('arduino_folder', chosen_folder)
app.arduino_info.refresh()
app.main_menu.refresh()
app.output_console.printText('Arduino %s.\n' % app.arduino_info.getVersionText())
app.constant.sketch_settings.set('full_compilation', True)
else:
(self.folder_list, self.level) = app.fileutil.enterNextLevel(index, self.folder_list, self.level, self.top_folder_list)
self.show_panel()
class ChangeSketchbookFolderCommand(sublime_plugin.WindowCommand):
def run(self):
root_list = app.fileutil.getOSRootList()
self.top_folder_list = root_list
self.folder_list = self.top_folder_list
self.level = 0
self.show_panel()
def show_panel(self):
folder_name_list = app.fileutil.getFolderNameList(self.folder_list)
sublime.set_timeout(lambda: self.window.show_quick_panel(folder_name_list, self.on_done), 10)
def on_done(self, index):
is_finished = False
if index == -1:
return
if self.level != 0 and index == 0:
chosen_folder = self.folder_list[index]
chosen_folder = chosen_folder.split('(')[1]
chosen_folder = chosen_folder[:-1]
app.output_console.printText(app.i18n.translate('Sketchbook is changed to {0}.\n', [chosen_folder]))
app.constant.global_settings.set('sketchbook_folder', chosen_folder)
app.arduino_info.refresh()
app.main_menu.refresh()
app.constant.sketch_settings.set('full_compilation', True)
else:
(self.folder_list, self.level) = app.fileutil.enterNextLevel(index, self.folder_list, self.level, self.top_folder_list)
self.show_panel()
class ChooseBuildFolderCommand(sublime_plugin.WindowCommand):
def run(self):
root_list = app.fileutil.getOSRootList()
self.top_folder_list = root_list
self.folder_list = self.top_folder_list
self.level = 0
self.show_panel()
def show_panel(self):
folder_name_list = app.fileutil.getFolderNameList(self.folder_list)
sublime.set_timeout(lambda: self.window.show_quick_panel(folder_name_list, self.on_done), 10)
def on_done(self, index):
is_finished = False
if index == -1:
return
if self.level != 0 and index == 0:
chosen_folder = self.folder_list[index]
chosen_folder = chosen_folder.split('(')[1]
chosen_folder = chosen_folder[:-1]
app.output_console.printText(app.i18n.translate('Build folder is changed to {0}.\n', [chosen_folder]))
app.constant.sketch_settings.set('build_folder', chosen_folder)
app.constant.sketch_settings.set('full_compilation', True)
else:
(self.folder_list, self.level) = app.fileutil.enterNextLevel(index, self.folder_list, self.level, self.top_folder_list)
self.show_panel()
class ChooseLanguageCommand(sublime_plugin.WindowCommand):
def run(self, language):
pre_language = app.constant.global_settings.get('language', -1)
if language != pre_language:
app.constant.global_settings.set('language', language)
app.i18n.refresh()
app.main_menu.refresh()
def is_checked(self, language):
state = False
chosen_language = app.constant.global_settings.get('language', -1)
if language == chosen_language:
state = True
return state
class SetGlobalSettingCommand(sublime_plugin.WindowCommand):
def run(self):
if app.active_file.isSrcFile():
global_settings = not app.constant.global_settings.get('global_settings', True)
app.constant.global_settings.set('global_settings', global_settings)
if global_settings:
folder = app.constant.stino_root
else:
folder = app.active_file.getFolder()
app.constant.sketch_settings.changeFolder(folder)
app.arduino_info.refresh()
app.main_menu.refresh()
else:
temp_global = not app.constant.global_settings.get('temp_global', False)
app.constant.global_settings.set('temp_global', temp_global)
def is_checked(self):
state = app.constant.global_settings.get('global_settings', True)
return state
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class SetFullCompilationCommand(sublime_plugin.WindowCommand):
def run(self):
full_compilation = not app.constant.sketch_settings.get('full_compilation', True)
app.constant.sketch_settings.set('full_compilation', full_compilation)
def is_checked(self):
state = app.constant.sketch_settings.get('full_compilation', True)
return state
class ShowCompilationOutputCommand(sublime_plugin.WindowCommand):
def run(self):
show_compilation_output = not app.constant.sketch_settings.get('show_compilation_output', False)
app.constant.sketch_settings.set('show_compilation_output', show_compilation_output)
def is_checked(self):
state = app.constant.sketch_settings.get('show_compilation_output', False)
return state
class ShowUploadOutputCommand(sublime_plugin.WindowCommand):
def run(self):
show_upload_output = not app.constant.sketch_settings.get('show_upload_output', False)
app.constant.sketch_settings.set('show_upload_output', show_upload_output)
def is_checked(self):
state = app.constant.sketch_settings.get('show_upload_output', False)
return state
class VerifyCodeCommand(sublime_plugin.WindowCommand):
def run(self):
verify_code = not app.constant.sketch_settings.get('verify_code', False)
app.constant.sketch_settings.set('verify_code', verify_code)
def is_checked(self):
state = app.constant.sketch_settings.get('verify_code', False)
return state
class OpenRefCommand(sublime_plugin.WindowCommand):
def run(self, url):
url = app.base.getUrl(url)
sublime.run_command('open_url', {'url': url})
class FindInReferenceCommand(sublime_plugin.WindowCommand):
def run(self):
ref_list = []
keyword_ref_dict = app.arduino_info.getKeywordRefDict()
view = app.active_file.getView()
selected_word_list = app.base.getSelectedWordList(view)
for selected_word in selected_word_list:
if selected_word in keyword_ref_dict:
ref = keyword_ref_dict[selected_word]
if not ref in ref_list:
ref_list.append(ref)
for ref in ref_list:
url = app.base.getUrl(ref)
sublime.run_command('open_url', {'url': url})
def is_enabled(self):
state = False
if app.active_file.isSrcFile():
state = True
return state
class AboutStinoCommand(sublime_plugin.WindowCommand):
def run(self):
sublime.run_command('open_url', {'url': 'https://github.com/Robot-Will/Stino'})
class PanelOutputCommand(sublime_plugin.TextCommand):
def run(self, edit, text):
pos = self.view.size()
self.view.insert(edit, pos, text)
self.view.show(pos)
class InsertIncludeCommand(sublime_plugin.TextCommand):
def run(self, edit, include_text):
view_size = self.view.size()
region = sublime.Region(0, view_size)
src_text = self.view.substr(region)
include_list = app.preprocess.genIncludeList(src_text)
if include_list:
last_include = include_list[-1]
index = src_text.index(last_include) + len(last_include)
else:
index = 0
self.view.insert(edit, index, include_text) | 35.380665 | 128 | 0.76727 | 23,163 | 0.988942 | 0 | 0 | 0 | 0 | 0 | 0 | 1,610 | 0.068739 |
61a22ded87c71674219b6b523e264a085c06ad4b | 1,358 | py | Python | the-lambda-trilogy/python/src/lambdas/the_lambda_lith/lambda_function.py | InfrastructureHQ/CDK-Patterns | 3531f2df028e87c1455b8afe95563d7552abb7af | [
"MIT"
] | null | null | null | the-lambda-trilogy/python/src/lambdas/the_lambda_lith/lambda_function.py | InfrastructureHQ/CDK-Patterns | 3531f2df028e87c1455b8afe95563d7552abb7af | [
"MIT"
] | null | null | null | the-lambda-trilogy/python/src/lambdas/the_lambda_lith/lambda_function.py | InfrastructureHQ/CDK-Patterns | 3531f2df028e87c1455b8afe95563d7552abb7af | [
"MIT"
] | null | null | null | def lambda_handler(event):
try:
first_num = event["queryStringParameters"]["firstNum"]
except KeyError:
first_num = 0
try:
second_num = event["queryStringParameters"]["secondNum"]
except KeyError:
second_num = 0
try:
operation_type = event["queryStringParameters"]["operation"]
if operation_type == "add":
result = add(first_num, second_num)
elif operation_type == "subtract":
result = subtract(first_num, second_num)
elif operation_type == "subtract":
result = multiply(first_num, second_num)
else:
result = "No Operation"
except KeyError:
return "No Operation"
return result
def add(first_num, second_num):
result = int(first_num) + int(second_num)
print("The result of % s + % s = %s" % (first_num, second_num, result))
return {"body": result, "statusCode": 200}
def subtract(first_num, second_num):
result = int(first_num) - int(second_num)
print("The result of % s - % s = %s" % (first_num, second_num, result))
return {"body": result, "statusCode": 200}
def multiply(first_num, second_num):
result = int(first_num) * int(second_num)
print("The result of % s * % s = %s" % (first_num, second_num, result))
return {"body": result, "statusCode": 200}
| 28.893617 | 75 | 0.620766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.21944 |
61a276c4ae721f1e18e33a6cdf82a3165f5b364f | 3,994 | py | Python | dqo/relational/tests/test_augmentation.py | danield137/deep_query_optimzation | 01a25c966338007f15d14dea1b37e388e47bcfe3 | [
"MIT"
] | null | null | null | dqo/relational/tests/test_augmentation.py | danield137/deep_query_optimzation | 01a25c966338007f15d14dea1b37e388e47bcfe3 | [
"MIT"
] | null | null | null | dqo/relational/tests/test_augmentation.py | danield137/deep_query_optimzation | 01a25c966338007f15d14dea1b37e388e47bcfe3 | [
"MIT"
] | null | null | null | from dqo.db.tests.datasets import employees_db_w_meta
from dqo.relational import SQLParser
from dqo.relational import parse_tree
test_db = employees_db_w_meta()
def test_condition_permutation():
sql = """
SELECT MIN(employees.salary)
FROM employees
WHERE employees.id > 200
"""
rel_tree = SQLParser.to_relational_tree(sql)
permutations = rel_tree.permutations()
assert len(permutations) == 2
queries = [parse_tree(p, keep_order=True).to_sql(pretty=False, alias=False) for p in permutations]
# ensure all are different textually
for i in range(len(queries)):
for j in range(i + 1, len(queries)):
assert queries[i] != queries[j]
# ensure they are all semantically the same
sentry = permutations[0]
for p in permutations[1:]:
assert len(list(sentry.get_selections())) == len(list(p.get_selections()))
assert len(list(sentry.get_projections())) == len(list(p.get_projections()))
assert len(list(sentry.relations.keys())) == len(list(p.relations.keys()))
def test_join_permutation():
sql = """
SELECT MIN(employees.salary)
FROM employees, departments, companies
WHERE employees.id = departments.id AND companies.id = departments.id
"""
rel_tree = SQLParser.to_relational_tree(sql)
permutations = rel_tree.permutations()
assert len(permutations) == 4
queries = [parse_tree(p, keep_order=True).to_sql(pretty=False, alias=False) for p in permutations]
# ensure all are different textually
for i in range(len(queries)):
for j in range(i + 1, len(queries)):
assert queries[i] != queries[j]
# ensure they are all semantically the same
sentry = permutations[0]
for p in permutations[1:]:
assert len(list(sentry.get_selections())) == len(list(p.get_selections()))
assert len(list(sentry.get_projections())) == len(list(p.get_projections()))
assert len(list(sentry.relations.keys())) == len(list(p.relations.keys()))
def test_conditions_permutation():
sql = """
SELECT MIN(employees.salary)
FROM employees
WHERE employees.id > 1 AND employees.salary > 100 AND employees.salary < 200
"""
rel_tree = SQLParser.to_relational_tree(sql)
permutations = rel_tree.permutations()
# assert len(permutations) == 6
queries = [parse_tree(p, keep_order=True).to_sql(pretty=False, alias=False) for p in permutations]
# ensure all are different textually
for i in range(len(queries)):
for j in range(i + 1, len(queries)):
assert queries[i] != queries[j]
# ensure they are all semantically the same
sentry = permutations[0]
for p in permutations[1:]:
assert len(list(sentry.get_selections())) == len(list(p.get_selections()))
assert len(list(sentry.get_projections())) == len(list(p.get_projections()))
assert len(list(sentry.relations.keys())) == len(list(p.relations.keys()))
def test_join_and_selection_permutations():
sql = """
SELECT MIN(employees.salary)
FROM employees, departments
WHERE employees.id > 1 AND employees.dept_id = departments.id
"""
rel_tree = SQLParser.to_relational_tree(sql)
permutations = rel_tree.permutations()
# assert len(permutations) == 8
queries = [parse_tree(p, keep_order=True).to_sql(pretty=False, alias=False) for p in permutations]
# ensure all are different textually
for i in range(len(queries)):
for j in range(i + 1, len(queries)):
assert queries[i] != queries[j]
# ensure they are all semantically the same
sentry = permutations[0]
for p in permutations[1:]:
assert len(list(sentry.get_selections())) == len(list(p.get_selections()))
assert len(list(sentry.get_projections())) == len(list(p.get_projections()))
assert len(list(sentry.relations.keys())) == len(list(p.relations.keys()))
| 34.431034 | 102 | 0.662494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 982 | 0.245869 |
61a4375cdc7350ac4c1ad459005657ca06a87ebe | 1,326 | py | Python | src/newt/db/_ook.py | bmjjr/db | 39d3833f4458fcd20d09f383711745842b5db4f2 | [
"MIT"
] | 153 | 2017-01-24T16:55:00.000Z | 2022-03-21T08:24:13.000Z | src/newt/db/_ook.py | bmjjr/db | 39d3833f4458fcd20d09f383711745842b5db4f2 | [
"MIT"
] | 14 | 2017-01-25T17:04:49.000Z | 2021-12-05T19:26:35.000Z | src/newt/db/_ook.py | bmjjr/db | 39d3833f4458fcd20d09f383711745842b5db4f2 | [
"MIT"
] | 16 | 2017-01-25T07:25:17.000Z | 2022-03-21T08:24:16.000Z | import relstorage.storage
import ZODB.Connection
# Monkey patches, ook
def _ex_cursor(self, name=None):
if self._stale_error is not None:
raise self._stale_error
with self._lock:
self._before_load()
return self._load_conn.cursor(name)
relstorage.storage.RelStorage.ex_cursor = _ex_cursor
def _ex_connect(self):
return self._adapter.connmanager.open()
relstorage.storage.RelStorage.ex_connect = _ex_connect
def _ex_get(self, oid, ghost_pickle):
"""Return the persistent object with oid 'oid'."""
if self.opened is None:
raise ConnectionStateError("The database connection is closed")
obj = self._cache.get(oid, None)
if obj is not None:
return obj
obj = self._added.get(oid, None)
if obj is not None:
return obj
obj = self._pre_cache.get(oid, None)
if obj is not None:
return obj
obj = self._reader.getGhost(ghost_pickle) # New code
# Avoid infiniate loop if obj tries to load its state before
# it is added to the cache and it's state refers to it.
# (This will typically be the case for non-ghostifyable objects,
# like persistent caches.)
self._pre_cache[oid] = obj
self._cache.new_ghost(oid, obj)
self._pre_cache.pop(oid)
return obj
ZODB.Connection.Connection.ex_get = _ex_get
| 28.212766 | 71 | 0.697587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.242081 |
61a4a530246434eb38e7c667db777d5e633e159d | 13,214 | py | Python | template1.py | gregwa1953/FCM160 | b5c0a912e140e78891f587d2d271c23bff7333c2 | [
"MIT"
] | null | null | null | template1.py | gregwa1953/FCM160 | b5c0a912e140e78891f587d2d271c23bff7333c2 | [
"MIT"
] | null | null | null | template1.py | gregwa1953/FCM160 | b5c0a912e140e78891f587d2d271c23bff7333c2 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ======================================================
# template1.py
# ------------------------------------------------------
# Created for Full Circle Magazine Issue #160
# Written by G.D. Walters
# Copyright (c) 2020 by G.D. Walters
# This source code is released under the MIT License
# ======================================================
import sys
import os
import platform
import datetime
import sqlite3
from PIL import ImageTk, Image
from dbutils import quote
from fpdf import fpdf
from fpdf import FPDF
from fpdf import Template
# ======================================================
dbname = "./tests/database/cookbook.db"
imagepath = "./tests/database/recipeimages/"
# The recipe information for the two sample recipes are defined as follows. These have been extracted
# from the actual database tables and converted into lists...
# ======================================================
recipe_table_dat = []
ingredients_table_dat = []
instructions_table_dat = []
cats_table_dat = []
images_table_dat = []
rtd = (
166, 'Mongolian Beef and Spring Onions',
'https://www.allrecipes.com/recipe/201849/mongolian-beef-and-spring-onions/',
'4 serving(s)', '30 minutes', 5, 1, None,
'A soy-based Chinese-style beef dish. Best served over soft rice noodles or rice.',
1)
recipe_table_dat.append(rtd)
# ingredients table
ing = [(1802, 166, None, None, None, '2 teaspoons vegetable oil'),
(1803, 166, None, None, None, '1 tablespoon finely chopped garlic'),
(1804, 166, None, None, None, '1/2 teaspoon grated fresh ginger root'),
(1805, 166, None, None, None, '1/2 cup soy sauce'),
(1806, 166, None, None, None, '1/2 cup water'),
(1807, 166, None, None, None, '2/3 cup dark brown sugar'),
(1808, 166, None, None, None,
'1 pound beef flank steak, sliced 1/4 inch thick on the diagonal'),
(1809, 166, None, None, None, '1/4 cup cornstarch'),
(1810, 166, None, None, None, '1 cup vegetable oil for frying'),
(1811, 166, None, None, None,
'2 bunches green onions, cut in 2-inch lengths')]
ingredients_table_dat.append(ing)
# instructions
instructions_table_dat.append((
157, 166,
'Heat 2 teaspoons of vegetable oil in a saucepan over medium heat, and cook and stir the garlic and ginger until they release their fragrance, about 30 seconds. Pour in the soy sauce, water, and brown sugar. Raise the heat to medium-high, and stir 4 minutes, until the sugar has dissolved and the sauce boils and slightly thickens. Remove sauce from the heat, and set aside.\nPlace the sliced beef into a bowl, and stir the cornstarch into the beef, coating it thoroughly. Allow the beef and cornstarch to sit until most of the juices from the meat have been absorbed by the cornstarch, about 10 minutes.\nHeat the vegetable oil in a deep-sided skillet or wok to 375 degrees F (190 degrees C).\nShake excess cornstarch from the beef slices, and drop them into the hot oil, a few at a time. Stir briefly, and fry until the edges become crisp and start to brown, about 2 minutes. Remove the beef from the oil with a large slotted spoon, and allow to drain on paper towels to remove excess oil.\nPour the oil out of the skillet or wok, and return the pan to medium heat. Return the beef slices to the pan, stir briefly, and pour in the reserved sauce. Stir once or twice to combine, and add the green onions. Bring the mixture to a boil, and cook until the onions have softened and turned bright green, about 2 minutes.\n\n'
))
# Cats
cats_table_dat.append([(166, 'Asian'), (166, 'Beef'), (166, 'Main Dish')])
# Images
images_table_dat.append((95, 166, './MongolianBeefandSpringOnions.png'))
# Amish White Bread
recipe_table_dat.append((
154, 'Amish White Bread',
'https://www.allrecipes.com/recipe/6788/amish-white-bread/',
'24 serving(s)', '150 minutes', 4, 1, None,
"I got this recipe from a friend. It is very easy, and doesn't take long to make.\n",
1))
ingredients_table_dat.append([
(1669, 154, None, None, None,
'2 cups warm water (110 degrees F/45 degrees C)'),
(1670, 154, None, None, None, '2/3 cup white sugar'),
(1671, 154, None, None, None, '1 1/2 tablespoons active dry yeast'),
(1672, 154, None, None, None, '1 1/2 teaspoons salt'),
(1673, 154, None, None, None, '1/4 cup vegetable oil'),
(1674, 154, None, None, None, '6 cups bread flour')
])
instructions_table_dat.append((
145, 154,
'In a large bowl, dissolve the sugar in warm water, and then stir in yeast. Allow to proof until yeast resembles a creamy foam.\nMix salt and oil into the yeast. Mix in flour one cup at a time. Knead dough on a lightly floured surface until smooth. Place in a well oiled bowl, and turn dough to coat. Cover with a damp cloth. Allow to rise until doubled in bulk, about 1 hour.\nPunch dough down. Knead for a few minutes, and divide in half. Shape into loaves, and place into two well oiled 9x5 inch loaf pans. Allow to rise for 30 minutes, or until dough has risen 1 inch above pans.\nBake at 350 degrees F (175 degrees C) for 30 minutes.\n\n\n'
))
cats_table_dat.append([(154, 'Breads'), (154, 'Breads')])
images_table_dat.append((79, 154, './AmishWhiteBread.png'))
# "Crack" Chicken
recipe_table_dat.append((
366, 'Crack Chicken', 'https://www.dinneratthezoo.com/crack-chicken/', '6',
'4 hours 5 minutes', '4.5', 1, None,
"This crack chicken is creamy ranch flavored chicken that's cooked in the crock pot until tender. A super easy slow cooker recipe that only contains 3 ingredients.\n",
1))
ingredients_table_dat.append([
(4821, 366, None, None, None, '2 lbs boneless skinless chicken breasts'),
(4822, 366, None, None, None, '1 ounce packet ranch seasoning'),
(4823, 366, None, None, None, '16 ounces cream cheese cut into cubes'),
(4824, 366, None, None, None,
'cooked crumbled bacon and green onions for serving optional')
])
instructions_table_dat.append((
361, 366,
'Place the chicken breasts, ranch seasoning and cream cheese in a slow cooker.\nCook on HIGH for 4 hours or LOW for 6-8 hours.\nShred the chicken with two forks. Stir until everything is thoroughly combined.\nServe, topped with bacon and green onions if desired.\n'
))
cats_table_dat.append([(366, 'American'), (366, 'Chicken'), (366, 'Main Dish'),
(366, 'Sandwich')])
images_table_dat.append((319, 366, './CrackChicken.png'))
# End of recipe information defination
# Template...
elements = [
{
'name': 'header',
'type': 'T',
'x1': 17.0,
'y1': 8.0,
'x2': 0,
'y2': 0,
'font': 'Arial',
'size': 8,
'bold': 0,
'italic': 0,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 2,
},
{
'name': 'title',
'type': 'T',
'x1': 17,
'y1': 26,
'x2': 0,
'y2': 0,
'font': 'Arial',
'size': 22,
'bold': 1,
'italic': 1,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 2,
},
{
'name': 'recipeimage',
'type': 'I',
'x1': 17,
'y1': 25,
'x2': 80,
'y2': 89,
'font': None,
'size': 0,
'bold': 0,
'italic': 0,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': 'image',
'priority': 2,
},
{
'name': 'ingreidentshead',
'type': 'T',
'x1': 17,
'y1': 220,
'x2': 0,
'y2': 0,
'font': 'Arial',
'size': 12,
'bold': 1,
'italic': 1,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': 'Ingredients:',
'priority': 2,
},
{
'name': 'ingredientitems',
'type': 'W',
'x1': 17,
'y1': 115,
'x2': 90,
'y2': 400,
'font': 'Arial',
'size': 11,
'bold': 0,
'italic': 0,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 2,
'multiline': True
},
{
'name': 'instructionhead',
'type': 'T',
'x1': 17,
'y1': 360,
'x2': 0,
'y2': 0,
'font': 'Arial',
'size': 12,
'bold': 1,
'italic': 1,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': 'Instructions:',
'priority': 2,
},
{
'name': 'instructions',
'type': 'W',
'x1': 17,
'y1': 185,
'x2': 160, # 200,
'y2': 400, # 400,
'font': 'Arial',
'size': 11,
'bold': 0,
'italic': 0,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 1,
'multiline': True
},
{
'name': 'description',
'type': 'T',
'x1': 85,
'y1': 28,
'x2': 200,
'y2': 35,
'font': 'Arial',
'size': 12,
'bold': 1,
'italic': 1,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 2,
'multiline': True
},
{
'name': 'source',
'type': 'T',
'x1': 85,
'y1': 60,
'x2': 200,
'y2': 66,
'font': 'Arial',
'size': 10,
'bold': 1,
'italic': 1,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 2,
'multiline': True
},
{
'name': 'servings',
'type': 'T',
'x1': 26,
'y1': 95,
'x2': 84,
'y2': 98,
'font': 'Arial',
'size': 10,
'bold': 1,
'italic': 0,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 2,
'multiline': False
},
{
'name': 'time',
'type': 'T',
'x1': 86,
'y1': 95,
'x2': 145,
'y2': 98,
'font': 'Arial',
'size': 10,
'bold': 1,
'italic': 0,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 2,
'multiline': False
},
{
'name': 'rating',
'type': 'T',
'x1': 148,
'y1': 95,
'x2': 200,
'y2': 98,
'font': 'Arial',
'size': 10,
'bold': 1,
'italic': 0,
'underline': 0,
'foreground': 0,
'background': 0,
'align': 'L',
'text': '',
'priority': 2,
'multiline': False
}
]
def create_pdf(which):
recipetitle = recipe_table_dat[which][1]
recipeid = recipe_table_dat[which][0]
f = Template(format="Letter", elements=elements, title="Recipe Printout")
print(f'Elements is a {type(elements)} structure')
f.add_page()
#we FILL some of the fields of the template with the information we want
#note we access the elements treating the template instance as a "dict"
f["header"] = f"Greg's cookbook - {recipetitle} - Recipe ID {recipeid}"
f["title"] = recipetitle # 'Mongolian Beef and Spring Onions'
f["recipeimage"] = images_table_dat[which][2]
f["description"] = recipe_table_dat[which][8]
f["source"] = recipe_table_dat[which][2]
f['servings'] = f'Servings: {recipe_table_dat[which][3]}'
f['time'] = f'Total Time: {recipe_table_dat[which][4]}'
f['rating'] = f'Rating: {recipe_table_dat[which][5]}'
itms = len(ingredients_table_dat[which])
ings = ''
for itm in range(itms):
ings = ings + ingredients_table_dat[which][itm][5] + "\n"
f["ingredientshead"]
f["ingredientitems"] = ings
f["instructionhead"]
f["instructions"] = instructions_table_dat[which][2]
#and now we render the page
filename = f'./{recipetitle}.pdf'
f.render(filename)
print(f'\n\n{"=" * 45}')
print(' PDF has been generated')
print(' Please open the PDF manually')
print('=' * 45)
print('\n\n')
def menu():
print('Please select a recipe...')
print('1 - Mongolian Beef and Spring Onions')
print('2 - Amish White Bread')
print('3 - "Crack" Chicken')
resp = input('Please enter 1, 2, 3 or 0 to quit --> ')
if resp == "0":
print('Exiting program!')
sys.exit(0)
elif resp in ("1", "2", "3"):
return resp
else:
return -1
def mainroutine():
loop = True
while loop:
resp = menu()
if resp == -1:
print('Invalid selection. Please try again')
else:
print(f'Requested recipe: {resp} \n')
create_pdf(int(resp) - 1)
if __name__ == '__main__':
mainroutine() | 32.546798 | 1,325 | 0.546163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,328 | 0.554563 |
61a585a44deae31d51275f2f707d00d0ecb8a314 | 1,744 | py | Python | arimaModel.py | gehadnaser/fraud-detection_stockmarket | 96c70daf304faa415435fdc457eba832ff9737bc | [
"MIT"
] | null | null | null | arimaModel.py | gehadnaser/fraud-detection_stockmarket | 96c70daf304faa415435fdc457eba832ff9737bc | [
"MIT"
] | null | null | null | arimaModel.py | gehadnaser/fraud-detection_stockmarket | 96c70daf304faa415435fdc457eba832ff9737bc | [
"MIT"
] | null | null | null | import numpy as np, pandas as pd
import math
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import adfuller, kpss, acf
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.figsize': (9, 7), 'figure.dpi': 120})
# Import data
def Read(name):
df = pd.read_csv(name + '.csv')
# get the Volume colume length
row_count=len(df)-1
#divide the length into equal haves
half_rowcount=row_count/2
# round up the length in case of float
count = math.ceil(half_rowcount)
# Create Training and Test
train = df.Volume[:count]
test = df.Volume[count:]
# 1,1,1 ARIMA Model
model = ARIMA(df.Volume, order=(1, 1, 1))
model_fit = model.fit(disp=0)
#print(model_fit.summary())
# Build Model
model = ARIMA(train, order=(1, 1, 1))
fitted = model.fit(disp=-1)
print(fitted.summary())
# Forecast
fc, se, conf = fitted.forecast(count, alpha=0.05) # 95% conf
# Make as pandas series
fc_series = pd.Series(fc, index=test.index)
lower_series = pd.Series(conf[:, 0], index=test.index)
upper_series = pd.Series(conf[:, 1], index=test.index)
# Plot
plt.figure(figsize=(12, 5), dpi=100)
plt.plot(train, label='training')
plt.plot(test, label='actual')
plt.plot(fc_series, label='forecast')
plt.fill_between(lower_series.index, lower_series, upper_series,
color='k', alpha=.15)
plt.title('Forecast vs Actuals')
plt.legend(loc='upper left', fontsize=8)
plt.show()
result = adfuller(df['Volume'],autolag='AIC')
if result[1] > 0.05:
print("fraud ")
else:
print("not fraud")
Read('foodico')
| 28.590164 | 69 | 0.62156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 401 | 0.229931 |
61a6fedac82e9e4878bc3c795bf52464ab6e2eb9 | 4,388 | py | Python | code/preprocess/data_process.py | hms-dbmi/VarPPUD | 316a45f33c12dfecadb17fa41b699ef95096a623 | [
"Apache-2.0"
] | null | null | null | code/preprocess/data_process.py | hms-dbmi/VarPPUD | 316a45f33c12dfecadb17fa41b699ef95096a623 | [
"Apache-2.0"
] | null | null | null | code/preprocess/data_process.py | hms-dbmi/VarPPUD | 316a45f33c12dfecadb17fa41b699ef95096a623 | [
"Apache-2.0"
] | 1 | 2022-01-18T17:14:31.000Z | 2022-01-18T17:14:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 15:38:54 2020
@author: rayin
"""
# pic-sure api lib
import PicSureHpdsLib
import PicSureClient
# python_lib for pic-sure
# https://github.com/hms-dbmi/Access-to-Data-using-PIC-SURE-API/tree/master/NIH_Undiagnosed_Diseases_Network
from python_lib.HPDS_connection_manager import tokenManager
from python_lib.utils import get_multiIndex_variablesDict
# analysis
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
#loading raw input patient data extracted by PIC-SURE from UDN
raw_data_all = pd.read_csv("data/raw/raw_data_all.csv")
#inclusion criteria
#exclude the cases with missing values of candidate gene and variant interpretation
case_data_with_gene = []
case_data_without_gene = []
for i in range(0, len(raw_data_all)):
if pd.isna(raw_data_all[raw_data_all.columns.values[21]].iloc[i]) or pd.isna(raw_data_all[raw_data_all.columns.values[26]].iloc[i]):
case_data_without_gene.append(raw_data_all.iloc[i])
else:
case_data_with_gene.append(raw_data_all.iloc[i])
#reformat
case_data_with_gene = pd.DataFrame(case_data_with_gene).reset_index()
case_data_with_gene = case_data_with_gene.iloc[:, 1:39]
case_data_without_gene = pd.DataFrame(case_data_without_gene).reset_index()
case_data_without_gene = case_data_without_gene.iloc[:, 1:39]
#filter the samples by row, axis=0 delete row and by column, axis = 1 delete column
def data_filter(df):
row_list = []
row_count = df.shape[1]
for i in range(0, df.shape[0]):
if df.iloc[i].isna().sum() > row_count/(2/3):
print(i)
row_list.append(i)
df_delete_row = df.drop(labels=row_list, axis=0) #inplace=True
df_delete_row.reset_index(drop=True, inplace=True)
column_count = df_delete_row.shape[0]
column_list = []
for j in range(0, df_delete_row.shape[1]):
if df_delete_row[df_delete_row.columns.values[j]].isna().sum() > column_count/2:
column_list.append(j)
drop_column = []
for i in range(0, len(column_list)):
drop_column.append(df_delete_row.columns.values[column_list[i]])
df_filter = df_delete_row.drop(labels=drop_column, axis=1)
return(df_filter)
case_data_with_gene_filter = data_filter(case_data_with_gene)
#statistics and visualization
column_name = list(case_data_with_gene_filter.columns.values)
case_data_with_gene_filter[column_name[2]].describe()
#Variant interpretation. Remove the rejected and under investigation cases.
Counter(case_data_with_gene_filter[column_name[20]])
case_gene_filter_labeled = case_data_with_gene_filter[case_data_with_gene_filter['\\11_Candidate genes\\Status\\'] != 'rejected']
case_gene_filter_labeled = case_gene_filter_labeled[case_gene_filter_labeled['\\12_Candidate variants\\03 Interpretation\\'] != 'investigation_n']
#define 'benign', 'likely benign' and 'uncertain' as 'less pathogenic', 'likely pathogenic' and 'pathogenic' as pathogenic'.
case_gene_filter_labeled = case_gene_filter_labeled.replace('benign', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('likely_benign', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('variant_u_s', 'less_pathogenic')
#case_gene_filter_labeled = case_gene_filter_labeled.replace('investigation_n', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('likely_pathogenic', 'pathogenic')
case_gene_filter_labeled.to_csv("data/processed/case_gene_filter_labeled.csv") #521 cases
#Manually remove the cases with unknown or incorrect gene names ('Exon-level microarray', '22q11.2 FISH', '20p13 duplication', etc.) and
#6 cases are excluded (index (after index_reset): 4, 55, 334, 408, 422, 496)
#Loading cases after manual curation from file case_gene_update.csv'
case_gene_update = pd.read_csv('data/processed/case_gene_update.csv', index_col=0) #515 cases
column_name = list(case_gene_update.columns.values)
protein_var = case_gene_update['\\12_Candidate variants\\09 Protein\\']
#Manual curation to remove cases with missing candidate variants or complex variants (e.g., long deletion and duplication)
#Export a clean version named 'variant_clean.csv'
| 33.496183 | 146 | 0.76618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,772 | 0.403829 |
61a97b1267f108543615dbe34e597a0abe6754f0 | 672 | py | Python | scripts/energy.py | rinku-mishra/PPDyn | b9e255e673998b9dc9f9753efa44c0c036020bf4 | [
"MIT"
] | 2 | 2021-03-20T23:15:42.000Z | 2021-04-30T14:46:45.000Z | scripts/energy.py | rinku-mishra/PPDyn | b9e255e673998b9dc9f9753efa44c0c036020bf4 | [
"MIT"
] | 20 | 2021-03-25T10:59:29.000Z | 2021-06-15T21:46:09.000Z | scripts/energy.py | rinku-mishra/PPDyn | b9e255e673998b9dc9f9753efa44c0c036020bf4 | [
"MIT"
] | 2 | 2021-03-21T12:20:43.000Z | 2021-06-14T11:11:59.000Z | #!/usr/bin/env python3
import numpy as np
import h5py
import matplotlib.pyplot as plt
# import plotly.graph_objects as go
#========= Configuration ===========
DIR ="../data"
file_name = "particle"#"rhoNeutral" #"P"
h5 = h5py.File('../data/'+file_name+'.hdf5','r')
Lx = h5.attrs["Lx"]
Ly = h5.attrs["Ly"]
Lz = h5.attrs["Lz"]
N = h5.attrs["N"]
dp = h5.attrs["dp"]
Nt = h5.attrs["Nt"]
data_num = np.arange(start=0, stop=Nt, step=1, dtype=int)
time = data_num*dp
energy = h5["/energy"]
energy = 3*(np.array(energy[:-1]))/N
fig,ax = plt.subplots(figsize=(6, 6))
plt.plot(time[1:],energy[1:])
ax.set_xlabel("$timestep$")
ax.set_ylabel("$Energy$")
plt.show()
| 16.390244 | 57 | 0.620536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.303571 |
61a99ae0d6b78d1440e41c906d9b4fb49d1d3957 | 882 | py | Python | webvulnscan/request.py | hhucn/webvulnscan | efb812fd5483157528f37794acecafa35ed0d878 | [
"MIT"
] | 40 | 2015-01-15T14:52:51.000Z | 2022-03-25T08:52:48.000Z | webvulnscan/request.py | RaviRaaja/webvulnscan | efb812fd5483157528f37794acecafa35ed0d878 | [
"MIT"
] | 1 | 2016-07-21T09:51:15.000Z | 2016-10-02T17:45:37.000Z | webvulnscan/request.py | RaviRaaja/webvulnscan | efb812fd5483157528f37794acecafa35ed0d878 | [
"MIT"
] | 22 | 2015-01-23T04:21:21.000Z | 2021-08-22T03:36:21.000Z | import copy
import sys
from . import compat
from .compat import urlencode, parse_qs
class Request(compat.Request):
def __init__(self, url, parameters=None, headers=None):
self.parameters = parameters
if parameters is None:
data = None
else:
if sys.version_info >= (3, 0):
data = urlencode(parameters).encode('utf-8')
else:
byte_parameters = dict(
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in parameters.items())
data = urlencode(byte_parameters)
assert isinstance(data, bytes)
if headers is None:
headers = {}
compat.Request.__init__(self, url, data, headers)
def copy(self):
return copy.copy(self)
@property
def url(self):
return self.get_full_url()
| 27.5625 | 60 | 0.557823 | 794 | 0.900227 | 0 | 0 | 63 | 0.071429 | 0 | 0 | 21 | 0.02381 |
61aa135945511486eea03b3338aef406ccf3474a | 1,123 | py | Python | api/api.py | mshen63/DevUp | b0e0d9a4a442cd5b51fb60823ed0a0837c440442 | [
"MIT"
] | 2 | 2022-02-13T17:02:05.000Z | 2022-03-01T10:30:02.000Z | api/api.py | mshen63/DevUp | b0e0d9a4a442cd5b51fb60823ed0a0837c440442 | [
"MIT"
] | 62 | 2021-08-03T17:53:17.000Z | 2021-08-31T15:58:24.000Z | api/api.py | mshen63/DevUp | b0e0d9a4a442cd5b51fb60823ed0a0837c440442 | [
"MIT"
] | 4 | 2021-08-04T15:11:06.000Z | 2021-08-29T19:58:42.000Z | import os
from flask import Flask
from dotenv import load_dotenv
from flask_cors import CORS
from flask_mail import Mail
# Database and endpoints
from flask_migrate import Migrate
from models.models import db
from endpoints.exportEndpoints import exportEndpoints
load_dotenv()
app = Flask(__name__, static_folder="../build", static_url_path="/")
app.register_blueprint(exportEndpoints)
CORS(app)
app.config["MAIL_SERVER"] = "smtp.mailtrap.io"
app.config["MAIL_PORT"] = os.getenv("MAIL_PORT")
app.config["MAIL_USE_TLS"] = True
app.config["MAIL_USE_SSL"] = False
mail = Mail(app)
app.secret_key = "development key"
app.config[
"SQLALCHEMY_DATABASE_URI"
] = "postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{table}".format(
user=os.getenv("POSTGRES_USER"),
passwd=os.getenv("POSTGRES_PASSWORD"),
host=os.getenv("POSTGRES_HOST"),
port=5432,
table=os.getenv("POSTGRES_DB"),
)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
migrate = Migrate(app, db)
@app.errorhandler(404)
def not_found(e):
return "Not found"
@app.route("/")
def index():
return "Im here"
| 22.019608 | 73 | 0.737311 | 0 | 0 | 0 | 0 | 112 | 0.099733 | 0 | 0 | 338 | 0.30098 |
61ad6e3acd78f945d9279a347fd33983cfd3d024 | 5,812 | py | Python | core/run.py | mofengboy/Chain-of-all-beings | b7f9750391a702b83728118f5db533ecb4d38bf0 | [
"Apache-2.0"
] | 2 | 2022-03-26T15:20:40.000Z | 2022-03-26T15:24:02.000Z | core/run.py | mofengboy/Chain-of-all-beings | b7f9750391a702b83728118f5db533ecb4d38bf0 | [
"Apache-2.0"
] | null | null | null | core/run.py | mofengboy/Chain-of-all-beings | b7f9750391a702b83728118f5db533ecb4d38bf0 | [
"Apache-2.0"
] | 1 | 2022-03-26T15:20:45.000Z | 2022-03-26T15:20:45.000Z | import getopt
import sys
import os
import logging.config
import time
import yaml
sys.path.append("../")
sys.path.append(os.path.abspath("."))
from core.app import APP
from core.utils.ciphersuites import CipherSuites
from core.utils.system_time import STime
from core.config.cycle_Info import ElectionPeriodValue
def run(sk_string, pk_string, server_url):
# 日志
with open('./config/log_config.yaml', 'r') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
logger = logging.getLogger("main")
# 校对系统时间,系统时间与NTP服务器时间不得超过1秒
if not STime.proofreadingTime():
# 抛出错误
logger.error("系统时间与NTP服务器时间不得超过1秒,请核对系统时间")
exit()
# 初始化核心 core
app = APP(sk_string, pk_string, server_url)
logger.info("全体初始化完成")
# # DEBUG模式 将自己添加到主节点列表
# # 仅限DEBUG模式,线上模式需要申请加入主节点
# app.mainNode.mainNodeList.addMainNode(node_info=app.mainNode.nodeInfo)
# app.storageGenesisBlock()
# #
# 获取主节点列表(读取配置文件)
while not app.loadMainNodeListBySeed():
logger.error("无法获得任何主节点IP的地址,请检测网络或者配置文件")
time.sleep(10)
logger.info("配置文件读取完成")
# 订阅
app.reSubscribe()
# 同步数据
logger.info("开始同步")
while not app.getCurrentEpochByOtherMainNode():
app.synchronizedBlockOfBeings()
app.synchronizedBlockOfTimes()
app.synchronizedBlockOfGarbage()
logger.info("同步完成")
# 检查主节点列表,即此时只有读取权限,没有写入权限
# 不再主节点列表时,可接受订阅数据
phase1 = False
phase2 = False
phase3 = False
# 启动处理周期事件的线程
app.dealPeriodicEvents()
# 保证在前30秒进入
while STime.getSecond() >= 30:
logger.info("请稍等")
time.sleep(1)
while True:
if app.mainNode.mainNodeList.userPKisExit(user_pk=app.user.getUserPKString()):
if 0 <= STime.getSecond() < 30 and phase1 is False:
app.startNewEpoch()
phase1 = True
logger.info("第一阶段完成:此时时间:" + str(STime.getSecond()))
if 30 <= STime.getSecond() < 40 and phase1 is True and phase2 is False:
app.startCheckAndApplyDeleteNode()
phase2 = True
logger.info("第二阶段完成:此时时间:" + str(STime.getSecond()))
if 40 <= STime.getSecond() < 60 and phase1 is True and phase2 is True and phase3 is False:
i = 0
while not app.startCheckAndSave():
i += 1
logger.info("第" + str(i) + "次尝试")
time.sleep(0.1)
if STime.getSecond() >= 50:
logger.warning("当前周期未能成功收集所有区块")
app.blockRecoveryOfBeings()
logger.info("第三阶段完成:此时时间:" + str(STime.getSecond()))
app.addEpoch()
logger.info("Epoch:" + str(app.getEpoch()))
if app.getEpoch() % ElectionPeriodValue == 0:
# 进入下一个选举周期
logger.info("进入下一个选举周期")
logger.info("暂停半小时")
app.initVote()
app.saveAssetOfTimesAndGarbage()
time.sleep(60)
# 校对时间
if not STime.proofreadingTime():
logger.warning("请校对系统时间,当前时间与NTP时间误差超过一秒")
phase1 = False
phase2 = False
phase3 = False
time.sleep(0.1)
else:
try:
if 0 <= STime.getSecond() < 40 and phase1 is False:
logger.info("当前节点不是主节点,请在其他主节点处进行申请")
logger.info("节点信息如下:")
logger.info(app.mainNode.getNodeInfo())
logger.info("节点签名如下:")
logger.info(app.mainNode.getNodeSignature())
app.startNewEpoch()
phase1 = True
logger.info("第一阶段完成:此时时间:" + str(STime.getSecond()))
if STime.getSecond() >= 40 and phase1 is True:
i = 0
while not app.startCheckAndSave():
i += 1
logger.info("第" + str(i) + "次尝试")
time.sleep(1)
if STime.getSecond() >= 50:
logger.warning("当前周期未能成功收集所有区块")
app.blockRecoveryOfBeings()
app.addEpoch()
logger.info("Epoch:" + str(app.getEpoch()))
if app.getEpoch() % ElectionPeriodValue == 0:
# 进入下一个选举周期
logger.info("进入下一个选举周期")
logger.info("暂停半小时")
app.initVote()
app.saveAssetOfTimesAndGarbage()
time.sleep(60)
# 校对时间
if not STime.proofreadingTime():
logger.warning("请校对系统时间,当前时间与NTP时间误差超过一秒")
phase1 = False
except Exception as error:
logger.error(error, exc_info=True)
time.sleep(1)
if __name__ == "__main__":
argv = sys.argv[1:]
# 私钥
private_key_string = ""
# 公钥
public_key_string = ""
# 平台服务网址
url = ""
try:
opts, args = getopt.getopt(argv, "s:p:u:") # 短选项模式
except Exception as err:
print(err)
exit()
for opt, arg in opts:
if opt == "-s":
private_key_string = arg
if opt == "-p":
public_key_string = arg
if opt == "-u":
url = arg
if not CipherSuites.verifyPublicAndPrivateKeys(private_key_string, public_key_string):
print("公钥与私钥不匹配")
exit()
if url == "":
print("server_url不能为空")
exit()
run(private_key_string, public_key_string, url)
| 31.759563 | 102 | 0.520303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,718 | 0.255427 |
61af5ec45b177f6c19b848d840e3989ad1c6c928 | 3,274 | py | Python | main.py | josehenriqueroveda/spraying-API | dffa2434b19ea75a372dedd7f90a88857b33a726 | [
"MIT"
] | null | null | null | main.py | josehenriqueroveda/spraying-API | dffa2434b19ea75a372dedd7f90a88857b33a726 | [
"MIT"
] | null | null | null | main.py | josehenriqueroveda/spraying-API | dffa2434b19ea75a372dedd7f90a88857b33a726 | [
"MIT"
] | null | null | null | from fastapi import FastAPI
from starlette.responses import RedirectResponse
from ratelimit import limits
import sys
import uvicorn
import requests
import json
import config
ONE_MINUTE = 60
app = FastAPI(title='Spraying conditions API',
description='API for real-time analysis of climatic conditions generating the result whether they are suitable or not for agricultural spraying.')
@app.get("/")
async def docs():
response = RedirectResponse(url='/docs')
return response
@limits(calls=30, period=ONE_MINUTE)
@app.get("/spray/condition")
async def check_spray_condition(city: str):
try:
response = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&lang=pt&appid={config.OWM_KEY}')
wheather_info = json.loads(response.text)
description = wheather_info['weather'][0]['main']
temperature = int(wheather_info['main']['temp'])
feels_like = int(wheather_info['main']['feels_like'])
humidity = int(wheather_info['main']['humidity'])
wind = int(wheather_info['wind']['speed'])*3.6
spray_condition = ''
bad_conditions = ['Thunderstorm', 'Drizzle', 'Rain', 'Snow']
if (description not in bad_conditions) and (10 < temperature < 30) and (10 < feels_like < 30) and (humidity > 50) and (3 < wind < 10):
spray_condition = 'Good weather conditions for spraying'
else:
if description in bad_conditions:
spray_condition = f'Bad weather conditions for spraying: {description}'
elif (temperature > 30) and (feels_like > 30) and (humidity > 50) and (3 < wind < 10):
spray_condition = f'Bad weather conditions: {temperature} °C is too hot for spraying'
elif (temperature <= 10) and (feels_like <= 10) and (humidity > 50) and (3 < wind < 10):
spray_condition = f'Bad weather conditions: {temperature} °C is too cold for spraying'
elif (temperature < 30) and (feels_like < 30) and (humidity < 50) and (3 < wind < 10):
spray_condition = f'Bad weather conditions: {humidity} % air humidity. It is below that recommended for spraying'
elif (temperature < 30) and (feels_like < 30) and (humidity > 50) and (wind < 3):
spray_condition = f'Bad weather conditions: The wind speed of {wind} km/h is very low and not recommended for spraying'
elif (temperature < 30) and (feels_like < 30) and (humidity > 50) and (wind > 10):
spray_condition = f'Bad weather conditions: The wind speed of {wind} km/h is above the recommended and can cause drift.'
else:
spray_condition = 'Bad weather conditions for spraying'
result = ({'city': city,
'description': description,
'temperature': f'{temperature} °C',
'feels_like': f'{feels_like} °C',
'humidity': f'{humidity} %',
'wind': f'{wind} km/h',
'spray_condition': spray_condition})
return result
except:
print("Unexpected error:", sys.exc_info()[0])
raise
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| 46.112676 | 160 | 0.624007 | 0 | 0 | 0 | 0 | 2,796 | 0.852959 | 2,716 | 0.828554 | 1,145 | 0.349298 |
61b0a336f45b9e9a987dc57de808aa5bdd651e6a | 4,519 | py | Python | task3_word2vec_lstm.py | tomelf/cnit-623 | edf25f0b216b2480f7b651d3b94c1377dff721c0 | [
"MIT"
] | 4 | 2020-08-28T21:33:48.000Z | 2021-01-16T22:43:36.000Z | task3_word2vec_lstm.py | tomelf/cnit-623 | edf25f0b216b2480f7b651d3b94c1377dff721c0 | [
"MIT"
] | null | null | null | task3_word2vec_lstm.py | tomelf/cnit-623 | edf25f0b216b2480f7b651d3b94c1377dff721c0 | [
"MIT"
] | 4 | 2021-01-16T22:42:52.000Z | 2021-01-16T22:43:45.000Z | import data_loader
import numpy as np
import pandas as pd
import pickle
import os
import nltk
import re
import timeit
from torch.autograd import Variable
import torch
from sklearn import preprocessing, svm
from sklearn.metrics import roc_auc_score, accuracy_score, classification_report
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.externals import joblib
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.model_selection import cross_val_score
from util.classification.lstm_nli import LSTMNLI, LSTMNLIPOS
def main():
meta_list, data_list = data_loader.load_data(load_train=True, load_dev=True, load_test=True)
train_meta, train_meta_corrected, \
dev_meta, dev_meta_corrected, \
test_meta, test_meta_corrected = meta_list
train_data, train_data_corrected, \
dev_data, dev_data_corrected, \
test_data, test_data_corrected = data_list
label_to_ix_list = train_meta["native_language"].unique().tolist()
label_to_ix = dict([(l,i+1) for i,l in enumerate(label_to_ix_list)])
for model_type in ["wordseqs", "posseqs"]:
if model_type=="wordseqs":
# word sequences
X_train = [[d["form"].tolist(), label_to_ix[train_meta["native_language"].iloc[i]]] for i,d in enumerate(train_data)]
X_dev = [[d["form"].tolist(), label_to_ix[dev_meta["native_language"].iloc[i]]] for i,d in enumerate(dev_data)]
X_test = [[d["form"].tolist(), label_to_ix[test_meta["native_language"].iloc[i]]] for i,d in enumerate(test_data)]
EMBEDDING_DIM = 300
HIDDEN_DIM = 200
LABEL_DIM = len(label_to_ix)
model = LSTMNLI(EMBEDDING_DIM, HIDDEN_DIM, LABEL_DIM, bidirectional=False)
if model_type=="posseqs":
# POS sequences
X_train = [[d["upostag"].tolist(), label_to_ix[train_meta["native_language"].iloc[i]]] for i,d in enumerate(train_data)]
X_dev = [[d["upostag"].tolist(), label_to_ix[dev_meta["native_language"].iloc[i]]] for i,d in enumerate(dev_data)]
X_test = [[d["upostag"].tolist(), label_to_ix[test_meta["native_language"].iloc[i]]] for i,d in enumerate(test_data)]
pos_to_ix = dict()
for i in range(len(X_train)):
sent = X_train[i][0]
for pos in sent:
if pos not in pos_to_ix:
pos_to_ix[pos] = len(pos_to_ix)
if "_" not in pos_to_ix:
pos_to_ix["_"] = len(pos_to_ix)
EMBEDDING_DIM = len(pos_to_ix)
HIDDEN_DIM = len(pos_to_ix)
LABEL_DIM = len(label_to_ix)
model = LSTMNLIPOS(EMBEDDING_DIM, HIDDEN_DIM, LABEL_DIM, pos_to_ix, bidirectional=True)
model.cuda()
model.set_train_data(X_train)
# model.set_dev_data(X_dev)
model.train(epoch=1000, lr=0.5)
# torch.save(model, 'model_{}.pt'.format(model_type))
with open("task3_doc2vec_lstm_results_{}.txt".format(model_type), "w") as output:
output.write("Evaluate train performance\n")
preds = []
actuals = []
for i in range(len(X_train)):
actual = X_train[i][1]
pred = model.test(X_train[i][0])
_, pred = torch.max(model.test(X_train[i][0]), 1)
preds.append(int(pred.data.cpu()))
actuals.append(actual)
preds = [label_to_ix_list[(p-1)] for p in preds]
actuals = [label_to_ix_list[(p-1)] for p in actuals]
output.write("Train report:\n {}\n".format(classification_report(actuals, preds)))
output.write("Accuracy: {}\n".format(accuracy_score(actuals, preds)))
output.write("Evaluate test performance\n")
preds = []
actuals = []
for i in range(len(X_test)):
actual = X_test[i][1]
pred = model.test(X_test[i][0])
_, pred = torch.max(model.test(X_test[i][0]), 1)
preds.append(int(pred.data.cpu()))
actuals.append(actual)
preds = [label_to_ix_list[(p-1)] for p in preds]
actuals = [label_to_ix_list[(p-1)] for p in actuals]
output.write("Testing report:\n {}\n".format(classification_report(actuals, preds)))
output.write("Accuracy: {}\n".format(accuracy_score(actuals, preds)))
if __name__ == "__main__":
main()
| 43.451923 | 132 | 0.623811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.111529 |
61b0ba8f2fe1cfda56083280555e02a787f5271a | 344 | py | Python | setup.py | trsav/romodel | b9e94edb0eeb89fe75be24e1eb303a077c02f58e | [
"MIT"
] | 27 | 2021-05-17T08:33:51.000Z | 2022-02-09T03:18:11.000Z | setup.py | trsav/romodel | b9e94edb0eeb89fe75be24e1eb303a077c02f58e | [
"MIT"
] | null | null | null | setup.py | trsav/romodel | b9e94edb0eeb89fe75be24e1eb303a077c02f58e | [
"MIT"
] | 7 | 2021-07-15T17:07:23.000Z | 2022-01-18T18:58:56.000Z | from setuptools import setup, find_packages
setup(
name='romodel',
version='0.0.2',
url='https://github.com/johwiebe/romodel.git',
author='Johannes Wiebe',
author_email='j.wiebe17@imperial.ac.uk',
description='Pyomo robust optimization toolbox',
packages=find_packages(),
install_requires=['pyomo', 'numpy'],
)
| 26.461538 | 52 | 0.69186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.430233 |
61b24b373be99e2cd4d1f337b0c0bc83758927a9 | 11,138 | py | Python | lambda_function.py | RAMCO-AMS/AlexaSkill | a3f54107f115b21680e9443b6e8dd63634fe7c81 | [
"MIT"
] | 1 | 2018-02-23T10:28:59.000Z | 2018-02-23T10:28:59.000Z | lambda_function.py | RAMCO-AMS/AlexaSkill | a3f54107f115b21680e9443b6e8dd63634fe7c81 | [
"MIT"
] | null | null | null | lambda_function.py | RAMCO-AMS/AlexaSkill | a3f54107f115b21680e9443b6e8dd63634fe7c81 | [
"MIT"
] | null | null | null | """
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
import config
import pycurl
from urllib import urlencode
import json
from io import BytesIO
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "MeetingIntent":
return get_meetings_from_api(intent, session)
elif intent_name == "ClassesIntent":
return get_classes_from_api(intent, session)
elif intent_name == "ContributionsIntent":
return get_contributions_from_api(intent, session)
elif intent_name == "NewMembersIntent":
return get_members_from_api(intent, session)
elif intent_name == "HotlineIntent":
return get_hotline_from_api(intent, session)
elif intent_name == "FinishIntent":
return Finish_Intent(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to the RAMCO Voice Assistant. Please tell me how can I help you. "
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "What type of information would you like? You can ask about RPAC, class registrations, meetings, and more."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_meetings_from_api(intent, session):
session_attributes = {}
reprompt_text = None
data = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, config.API_URL)
c.setopt(pycurl.CAINFO, config.PEM_FILE)
c.setopt(c.WRITEFUNCTION, data.write)
payload = {'key':config.API_KEY,
'Operation':'GetEntities',
'Entity':'cobalt_meetingregistration',
'Attributes':'cobalt_name',
'Filter':'CreatedOn<ge>2015-11-01'}
postfields = urlencode(payload)
c.setopt(c.POSTFIELDS, postfields)
c.perform()
dictionary = json.loads(data.getvalue())
speech_output = "You currently have " + str(len(dictionary['Data'])) + " meeting registrations this month"
should_end_session = False
c.close()
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def get_contributions_from_api(intent, session):
session_attributes = {}
reprompt_text = None
data = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, config.API_URL)
c.setopt(pycurl.CAINFO, config.PEM_FILE)
c.setopt(c.WRITEFUNCTION, data.write)
payload = {'key':config.API_KEY, 'Operation':'GetEntities', 'Entity':'cobalt_contribution', 'Attributes':'cobalt_name', 'Filter':'CreatedOn<ge>2015-09-01'}
postfields = urlencode(payload)
c.setopt(c.POSTFIELDS, postfields)
c.perform()
dictionary = json.loads(data.getvalue())
speech_output = "You currently have 47 R pack contributions this month totalling 1545 dollars"
should_end_session = False
c.close()
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def get_classes_from_api(intent, session):
session_attributes = {}
reprompt_text = None
data = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, config.API_URL)
c.setopt(pycurl.CAINFO, config.PEM_FILE)
c.setopt(c.WRITEFUNCTION, data.write)
payload = {'key':config.API_KEY, 'Operation':'GetEntities', 'Entity':'cobalt_classregistration', 'Attributes':'createdby', 'Filter':'CreatedOn<ge>2016-03-01'}
postfields = urlencode(payload)
c.setopt(c.POSTFIELDS, postfields)
c.perform()
dictionary = json.loads(data.getvalue())
speech_output = "You have " + str(len(dictionary['Data'])) + " class registrations for March"
c.close()
should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def get_members_from_api(intent, session):
session_attributes = {}
reprompt_text = None
data = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, config.API_URL)
c.setopt(pycurl.CAINFO, config.PEM_FILE)
c.setopt(c.WRITEFUNCTION, data.write)
payload = {'key':config.API_KEY, 'Operation':'GetEntities', 'Entity':'cobalt_membership', 'Attributes':'cobalt_name', 'Filter':'CreatedOn<ge>2016-03-01'}
postfields = urlencode(payload)
c.setopt(c.POSTFIELDS, postfields)
c.perform()
dictionary = json.loads(data.getvalue())
speech_output = "Over the past thirty days, there have been " + str(len(dictionary['Data'])) + " new members joined the association."
c.close()
should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def get_hotline_from_api(intent, session):
session_attributes = {}
reprompt_text = None
data = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, config.API_URL)
c.setopt(pycurl.CAINFO, config.PEM_FILE)
c.setopt(c.WRITEFUNCTION, data.write)
payload = {'key':config.API_KEY, 'Operation':'GetEntities', 'Entity':'incident', 'Attributes':'createdby', 'Filter':'CreatedOn<ge>2016-03-01'}
postfields = urlencode(payload)
c.setopt(c.POSTFIELDS, postfields)
c.perform()
dictionary = json.loads(data.getvalue())
speech_output = "So far, there are have been " + str(len(dictionary['Data'])) + " legal hotline calls this month"
c.close()
should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def Finish_Intent(intent, session):
session_attributes = {}
reprompt_text = None
speech_output = "Goodbye."
should_end_session = True
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
| 37.755932 | 162 | 0.687915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,779 | 0.429072 |
61b28c4bd878652604bcfea27884f4991efa0ed4 | 5,456 | py | Python | preprocessing/HIPSCI/extract_seq_majiq_exon_cons.py | arnegebert/splicing | 3e19ce83a9f6d98bc6c2d8b653660d22e453ca77 | [
"MIT"
] | 1 | 2021-05-13T15:30:39.000Z | 2021-05-13T15:30:39.000Z | preprocessing/HIPSCI/extract_seq_majiq_exon_cons.py | arnegebert/splicing | 3e19ce83a9f6d98bc6c2d8b653660d22e453ca77 | [
"MIT"
] | null | null | null | preprocessing/HIPSCI/extract_seq_majiq_exon_cons.py | arnegebert/splicing | 3e19ce83a9f6d98bc6c2d8b653660d22e453ca77 | [
"MIT"
] | null | null | null | from collections import defaultdict, Counter
import numpy as np
import time
from utils import one_hot_encode_seq, reverse_complement, overlap
import matplotlib.pyplot as plt
startt = time.time()
psis = []
data_path = '../../data'
save_to_cons = 'hipsci_majiq/exon/cons.npy'
introns_bef_start = 70 # introns
exons_after_start = 70 # exons
exons_bef_end = 70 # exons
introns_after_end = 70 # introns
# want to load chromosome as one giant string
def load_chrom_seq(chrom):
with open(f'../../data/chromosomes/chr{chrom}.fa') as f:
loaded_chrom_seq = f.read().replace('\n', '')
if chrom < 10:
return loaded_chrom_seq[5:]
else:
return loaded_chrom_seq[6:]
l1s, l2s, l3s = [], [], []
cons_exons = []
prev_astart, prev_aend = 0, 0
prev_dstart, prev_dend = 0, 0
prev_jstart, prev_jend = 0, 0
overlaps = 0
exon_mean, exon_std, intron_mean, intron_std = 145.42, 198.0, 5340., 17000.
for ttt in range(-4, 4):
counts_start, counts_end = defaultdict(lambda: 0), defaultdict(lambda: 0)
counter = 1000000000
with open('../../majiq/builder/constitutive_junctions_sorted_stranded.tsv') as f:
loaded_chrom = 1
chrom_seq = load_chrom_seq(loaded_chrom)
for i, l in enumerate(f):
if i==0: continue
if i % 1000 == 0: # ~ 357500 junctions
print(f'Reading line {i}')
geneid, chrom, jstart, jend, dstart, dend, astart, aend, strand = l.replace('\n','').split('\t')
chrom, jstart, jend, dstart, dend, astart, aend, psi = int(chrom), int(jstart), int(jend), int(dstart), \
int(dend), int(astart), int(aend), 1.0
if chrom > loaded_chrom:
loaded_chrom += 1
chrom_seq = load_chrom_seq(loaded_chrom)
if overlap(prev_jstart, prev_jend, jstart, jend):
overlaps += 1
if (prev_astart, prev_aend) == (dstart, dend):
# if strand == '+': continue
if strand == '+':
window_around_start = chrom_seq[dstart-introns_bef_start-3:dstart+exons_after_start-3]
window_around_end = chrom_seq[dend-exons_bef_end+2:dend+introns_after_end+2]
elif strand == '-':
window_around_start = chrom_seq[dstart-introns_bef_start-3:dstart+exons_after_start-3]
window_around_end = chrom_seq[dend-exons_bef_end+2:dend+introns_after_end+2]
extr1, extr2 = chrom_seq[dstart+ttt:dstart+ttt+2], chrom_seq[dend+ttt:dend+ttt+2]
if strand == '-':
window_around_start, window_around_end = reverse_complement(window_around_end[::-1]), \
reverse_complement(window_around_start[::-1])
extr1, extr2 = reverse_complement(extr2[::-1]), reverse_complement(extr1[::-1])
extr3, extr4 = window_around_start[70:72], window_around_end[68:70]
# + strand works for both, - strand works for none
# 100% AG for -3 offset for + strand, 0 offset - strand <--- start
# 99% GT at 0 offset only + strand, -3 offset for - strand <--- end
start, end = one_hot_encode_seq(window_around_start), one_hot_encode_seq(window_around_end)
start, end = np.array(start), np.array(end)
l1, l2, l3 = dstart-prev_dend, dend-dstart, astart-dend
l1, l2, l3 = (l1-intron_mean)/intron_std, (l2-exon_mean)/exon_std, (l3-intron_mean)/intron_std
lens_and_psi_vector = np.array([l1, l2, l3, psi])
l1s.append(l1)
l2s.append(l2)
l3s.append(l3)
start_and_end = np.concatenate((start, end))
sample = np.concatenate((start_and_end,lens_and_psi_vector.reshape(1,4))).astype(np.float32)
cons_exons.append(sample)
# GT-AG and GC-AG
# print(extr1)
target = 100000000
if i < target:
counts_start[extr3.upper()] += 1
counts_end[extr4.upper()] += 1
if target == i:
print('-' * 40)
for (k, v) in counts_start.items():
percent = v / target
if percent > 0.05:
print(f'{k} :{percent:.2f}')
print('-' * 40)
break
psis.append(psi)
prev_astart, prev_aend = astart, aend
prev_dstart, prev_dend = dstart, dend
prev_jstart, prev_jend = jstart, jend
cons_exons = np.array(cons_exons)
psis = np.array(psis)
l1s, l2s, l3s = np.array(l1s), np.array(l2s), np.array(l3s)
l1avg, l2avg, l3avg = np.mean(l1s), np.mean(l2s), np.mean(l3s)
l1median, l2median, l3median = np.median(l1s), np.median(l2s), np.median(l3s)
plt.hist(l1s)
plt.xlabel('normalized L1 value')
plt.ylabel('number of data points')
plt.title('Constitutive exons MAJIQ')
# plt.show()
print(f'Most common nucleotides right after exon start: {Counter(counts_start).most_common(1)}')
print(f'Most common nucleotides right before exon end: {Counter(counts_end).most_common(1)}')
print(f'L1avg: {l1avg}, l2avg: {l2avg}, l3avg: {l3avg}')
print(f'L1 median: {l1median}, l2 median: {l2median}, l3 median: {l3median}')
print(f'Number of cons exon: {len(cons_exons)}')
print(f'Number of overlapping (supposedly constitutive) junctions: {overlaps}')
np.save(f'{data_path}/{save_to_cons}', cons_exons)
print(f'Runtime {time.time()-startt}') | 41.969231 | 113 | 0.61272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,104 | 0.202346 |
61b2a1604d4866a4eaf485c5fbc8ad47fa3ccd88 | 8,228 | py | Python | stream/migrations/0001_init_models.py | freejooo/vigilio | d21bf4f9d39e5dcde5d7c21476d8650e914c3c66 | [
"MIT"
] | 137 | 2021-03-26T18:19:45.000Z | 2022-03-06T07:48:23.000Z | stream/migrations/0001_init_models.py | rrosajp/vigilio | d21bf4f9d39e5dcde5d7c21476d8650e914c3c66 | [
"MIT"
] | 11 | 2021-03-28T00:07:00.000Z | 2021-05-04T12:54:58.000Z | stream/migrations/0001_init_models.py | rrosajp/vigilio | d21bf4f9d39e5dcde5d7c21476d8650e914c3c66 | [
"MIT"
] | 16 | 2021-03-27T23:58:53.000Z | 2022-03-20T14:52:13.000Z | # Generated by Django 3.1.5 on 2021-03-17 21:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Movie",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("imdb_id", models.CharField(max_length=10)),
("title", models.CharField(blank=True, max_length=120)),
("description", models.TextField(blank=True, null=True)),
("moviedb_popularity", models.FloatField(blank=True, null=True)),
(
"poster_path_big",
models.CharField(blank=True, max_length=255, null=True),
),
(
"poster_path_small",
models.CharField(blank=True, max_length=255, null=True),
),
(
"backdrop_path_big",
models.CharField(blank=True, max_length=255, null=True),
),
(
"backdrop_path_small",
models.CharField(blank=True, max_length=255, null=True),
),
("duration", models.IntegerField(default=0)),
("media_info_raw", models.JSONField(blank=True, default=dict)),
("imdb_score", models.FloatField(default=0.0)),
(
"original_language",
models.CharField(blank=True, max_length=2, null=True),
),
("release_date", models.DateField(blank=True, null=True)),
("is_adult", models.BooleanField(default=False)),
("is_ready", models.BooleanField(default=False)),
("updated_at", models.DateTimeField(auto_now=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="MovieDBCategory",
fields=[
("moviedb_id", models.IntegerField(primary_key=True, serialize=False)),
("name", models.CharField(max_length=20)),
("updated_at", models.DateTimeField(auto_now=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="MovieSubtitle",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("full_path", models.CharField(max_length=255)),
("relative_path", models.CharField(max_length=255)),
("file_name", models.CharField(max_length=255)),
("suffix", models.CharField(max_length=7)),
("updated_at", models.DateTimeField(auto_now=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name="UserMovieHistory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("current_second", models.IntegerField(default=0)),
("remaining_seconds", models.IntegerField(default=0)),
("is_watched", models.BooleanField(default=False)),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"movie",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="history",
to="stream.movie",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="MyList",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"movie",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="my_list",
to="stream.movie",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="MovieContent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("torrent_source", models.TextField(null=True)),
("full_path", models.CharField(blank=True, max_length=255, null=True)),
(
"relative_path",
models.CharField(blank=True, max_length=255, null=True),
),
(
"main_folder",
models.CharField(blank=True, max_length=255, null=True),
),
("file_name", models.CharField(blank=True, max_length=255, null=True)),
(
"file_extension",
models.CharField(blank=True, max_length=255, null=True),
),
(
"source_file_name",
models.CharField(blank=True, max_length=255, null=True),
),
(
"source_file_extension",
models.CharField(blank=True, max_length=255, null=True),
),
("resolution_width", models.IntegerField(default=0)),
("resolution_height", models.IntegerField(default=0)),
("raw_info", models.TextField(blank=True, null=True)),
("is_ready", models.BooleanField(default=False)),
("updated_at", models.DateTimeField(auto_now=True)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"movie_subtitle",
models.ManyToManyField(blank=True, to="stream.MovieSubtitle"),
),
],
),
migrations.AddField(
model_name="movie",
name="movie_content",
field=models.ManyToManyField(to="stream.MovieContent"),
),
migrations.AddField(
model_name="movie",
name="moviedb_category",
field=models.ManyToManyField(blank=True, to="stream.MovieDBCategory"),
),
]
| 38.448598 | 87 | 0.444336 | 8,069 | 0.980676 | 0 | 0 | 0 | 0 | 0 | 0 | 1,006 | 0.122265 |
61b2c8e21cc921ae9578df5b257930ae8f0874f7 | 2,958 | py | Python | iris/iris_classifier.py | xuanthuong/tensorflow-course | 6b97ccf92e2586979a50468bd753ff46d00bdfc3 | [
"MIT"
] | null | null | null | iris/iris_classifier.py | xuanthuong/tensorflow-course | 6b97ccf92e2586979a50468bd753ff46d00bdfc3 | [
"MIT"
] | null | null | null | iris/iris_classifier.py | xuanthuong/tensorflow-course | 6b97ccf92e2586979a50468bd753ff46d00bdfc3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Iris classification example, pratice on using high-level API
Algorithms: Neutral Network
Reference: https://www.tensorflow.org/get_started/tflearn
Date: Jun 14, 2017
@author: Thuong Tran
@Library: tensorflow - high-level API with tf.contrib.learn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# import urllib # only python 2
import urllib.request # python 3
import tensorflow as tf
import numpy as np
IRIS_TRAINING = "./iris_dataset/iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "./iris_dataset/iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
def main():
# If the training and test sets aren't stored locally, download them.
if not os.path.exists(IRIS_TRAINING):
raw = urllib.request.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, 'wb') as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.request.urlopen(IRIS_TEST_URL).read()
with open(IRIS_TEST, 'wb') as f:
f.write(raw)
# Load datasets
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename = IRIS_TRAINING,
target_dtype = np.int,
features_dtype = np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename = IRIS_TEST,
target_dtype = np.int,
features_dtype = np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension = 4)]
# Build 3 layer DNN with 10, 20, 10 units respectively
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units = [10, 20, 10],
n_classes = 3,
model_dir = "./tmp/iris_models")
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model
classifier.fit(input_fn = get_train_inputs, steps = 2000)
# # Equivalent to follows:
# classifier.fit(x = training_set.data, y = training_set.target, steps = 1000)
# classifier.fit(x = training_set.data, y = training_set.target, steps = 1000)
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy
accuracy_score = classifier.evaluate(input_fn = get_test_inputs, steps = 1)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
# Predict for new example
def new_samples():
return np.array(
[[6.4, 3.2, 4.5, 1.5],
[5.8, 3.1, 5.0, 1.7]], dtype = np.float32)
predictions = list(classifier.predict(input_fn = new_samples))
print("New samples, Class predictions: {}\n".format(predictions))
if __name__ == "__main__":
main() | 32.866667 | 91 | 0.684246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,029 | 0.34787 |
61b4452039e1df0b8c34d097d279b8a63e6014fc | 1,728 | py | Python | src/main.py | NaBotProject/translate-discord-bot | 41d050a89803c47bf09e5ad0f5119a9ca038d61c | [
"MIT"
] | 1 | 2021-12-12T07:56:34.000Z | 2021-12-12T07:56:34.000Z | src/main.py | NaBotProject/translate-discord-bot | 41d050a89803c47bf09e5ad0f5119a9ca038d61c | [
"MIT"
] | null | null | null | src/main.py | NaBotProject/translate-discord-bot | 41d050a89803c47bf09e5ad0f5119a9ca038d61c | [
"MIT"
] | null | null | null | import discord
import os
import openpyxl
from deep_translator import GoogleTranslator
client = discord.Client()
TOKEN = os.getenv('TOKEN')
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('$help'):
text="In the first line, you have to write the language that is your input word and the language that you want to be your output like this: \n en fr \n In the second line, you must write the sentence you want to translate like this: \n hi world"
await message.channel.send(text)
elif message.content.startswith(''):
my_string=message.content
first = my_string.split('\n', 1)[0]
second_line = my_string.split('\n', 1)[1]
N = 0
count = 0
secondlang = ""
for ele in first:
if ele == ' ':
count = count + 1
if count == N:
break
secondlang = ""
else :
secondlang = secondlang + ele
Nn = 1
coun = 0
firstlang = ""
for el in first:
if el == ' ':
coun = coun + 1
if coun == Nn:
break
firstlang = ""
else :
firstlang = firstlang + el
translated = GoogleTranslator(source=firstlang, target=secondlang).translate(second_line) # output -> Weiter so, du bist großartig
await message.channel.send(translated)
client.run(TOKEN)
# print(translated)
| 27.428571 | 254 | 0.545718 | 0 | 0 | 0 | 0 | 1,523 | 0.880856 | 1,493 | 0.863505 | 371 | 0.214575 |
61b4dd53e7ab50a4c34eeefce040ae4930eeb60c | 14,042 | py | Python | createSampledata.py | sjoplin/Ace-Analytics | 38e39696de594bf6696a6be02066ac6e4ca2b7af | [
"MIT"
] | 1 | 2019-01-09T15:19:44.000Z | 2019-01-09T15:19:44.000Z | createSampledata.py | sjoplin/Moneyball | 38e39696de594bf6696a6be02066ac6e4ca2b7af | [
"MIT"
] | 7 | 2020-02-04T22:11:54.000Z | 2022-03-11T23:37:24.000Z | createSampledata.py | sjoplin/Ace-Analytics | 38e39696de594bf6696a6be02066ac6e4ca2b7af | [
"MIT"
] | null | null | null | # Import all libraries needed for the tutorial
import pandas as pd
import re
import requests
from bs4 import BeautifulSoup
from makePDFs import printPDFs
from time import sleep
#parses the rawtext into panda format
#add playerdata back
def generateStats(playerdata, teamName):
#Keys of all potential outcomes, directions and trajectory.
#Basically looking for these words in the rawtext
# print(playerdata)
roster = playerdata["Names"].tolist()
# print(roster)
potOutcome = ["grounded", "flied", "lined", "double", "popped", "singled",
"doubled", "tripled", "homered", "fouled"]
direction = ['p', '3b.', 'catcher', 'shortstop', 'pitcher', '1b', 'first',
'2b', 'c', 'second', '3b', 'third', 'ss',
'lf', 'left', 'cf', 'center', 'rf', 'right', 'middle', 'short']
# outcomes = ["grounded", "flied", "lined", "double", "popped", "singled", "reached"
# "doubled", "tripled", "homered", "struck", "out", "pinch", "stole",
# "fouled"]
#arrays for various categories
names = [];
results = [];
area = [];
#the dict works like this: {"player1": [# of strikes, # of walks, # of stolen bases, # of bunts],
# "player2": [# of strikes, # of walks, # of stolen bases, # of bunts],
# "player3": [# of strikes, # of walks, # of stolen bases, # of bunts]... etc}
playerDict = {};
#reading the raw data
f = open('./interdata/scraperaw.txt')
line = f.readline()
count = 0
temp = ''
while line:
#splits each line by individual words
words = line.split(' ')
newlines = []
for each in words:
each = each.strip(',')
each = each.strip('.\n')
newlines.append(each)
words = newlines
#marker to see if the player name has been added to the array
num1 = 0
#loop through every word and find the words that match the ones in the
#keys
#
#
# print(words)
# names.append(words[0].strip(',').lower())
tempName = words[0].lower()
# if (len(tempName) > 2 and len(words[1]) > 2 and words[1] not in outcomes):
# tempName = words[1].lower() + ', ' + tempName[0]
print("THIS IS ROSTER:")
print(roster)
if len(words[1]) <= 2:
tempName = tempName + ', ' + words[1].lower()
elif (len(tempName) <= 2):
tempName = words[1].lower() + ', ' + tempName
if (',') in tempName:
temp = tempName.split(',')
tempName = temp[0]
tempName2 = tempName
for i in range(len(roster)):
print("THIS IS TEMPNAME")
print(tempName)
each = roster[i].lower()
index = each.find(tempName2)
print("THIS IS INDEX")
print(index)
if (index != -1):
tempName = roster[i]
break
#adds the players to a dictionary and calculates the number of strikes
#walks and steals for each player
#the dict works like this: {"player": [# of strikes, # of walks, # of stolen bases, # of bunts]}
if ('struck' in words):
#add the player name if not in dictionary, initalize all values to
#0, and then add the count for the appropriate value
if (tempName not in playerDict.keys()):
playerDict[tempName] = [0, 0, 0, 0]
playerDict[tempName][0] = 1
else:
playerDict[tempName][0] = playerDict[tempName][0] + 1
elif ('walked' in words):
#add 1 to count
if (tempName not in playerDict.keys()):
playerDict[tempName] = [0, 0, 0, 0]
playerDict[tempName][1] = 1
else:
playerDict[tempName][1] = playerDict[tempName][1] + 1
elif ('stole' in words):
if (tempName not in playerDict.keys()):
playerDict[tempName] = [0, 0, 0, 0]
playerDict[tempName][2] = 1
else:
playerDict[tempName][2] = playerDict[tempName][2] + 1
elif ('bunt' in words):
if (tempName not in playerDict.keys()):
playerDict[tempName] = [0, 0, 0, 0]
playerDict[tempName][3] = 1
else:
playerDict[tempName][3] = playerDict[tempName][3] + 1
# checks to see if a player bunted first and then adds the player to
# players array and adds the result as bunt
if ('bunt' in words):
dirFlag = True
names.append(tempName)
results.append('bunt')
for each in words:
if dirFlag:
if (each == 'down'):
for each in words:
if each in direction:
area.append('down ' + each)
dirFlag = False
if (each == 'through'):
for other in words:
if (other in direction):
# print('inside')
if (other == 'rf' or other == 'right' or other == 'left' or other == 'lf'):
area.append('through ' + other)
dirFlag = False
else:
if (dirFlag):
if (each in direction):
area.append(each)
dirFlag = False
if dirFlag:
area.append('pitcher')
dirFlag = False
else:
for each in words:
resFlag = True
dirFlag = True
if (each in potOutcome) and (resFlag):
result = each
for each in words:
if dirFlag and resFlag:
if ('center' in words):
for other in words:
if (other in direction) and (other != 'center'):
# print('inside')
if (other == 'rf' or other == 'right' or other == 'left' or other == 'lf'):
area.append(other + ' center')
dirFlag = False
names.append(tempName)
results.append(result)
resFlag = False
if (each == 'down'):
for each in words:
if each in direction:
area.append('down ' + each)
dirFlag = False
names.append(tempName)
results.append(result)
resFlag = False
if (each == 'through'):
for other in words:
if (other in direction):
# print('inside')
if (other == 'rf' or other == 'right' or other == 'left' or other == 'lf'):
area.append('through ' + other)
dirFlag = False
names.append(tempName)
results.append(result)
resFlag = False
else:
if (dirFlag):
if (each in direction):
area.append(each)
dirFlag = False
names.append(tempName)
results.append(result)
resFlag = False
# checks to see if the name is added in results but not already in
# the player dict,
if ((tempName not in playerDict.keys()) and (tempName in names)):
playerDict[tempName] = [0, 0, 0, 0]
line = f.readline()
f.close()
s = pd.Series(names)
p = pd.Series(results)
a = pd.Series(area)
data = pd.DataFrame({'Names': s, 'Results': p, 'Area': a})
pd.set_option('display.max_rows', 220)
print(data)
print(playerDict)
# TODO: uncomment
printPDFs(data, playerdata, playerDict, teamName)
def getAllPlayers(url):
quote_page = url
# query the website and return the html to the variable page
hdr = {
'Moneyball': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
session = requests.Session()
req = session.get(quote_page, headers=hdr)
soup = BeautifulSoup(req.content, 'html.parser')
print('got players\n' + str(soup))
tester = soup.findAll('a')
#now we have the url for the team roster
teamRoster = 'http://stats.ncaa.org' + str(tester[9])[9:33]
session.close()
sleep(1)
return (getPlayerStats(teamRoster))
def getPlayerStats(url):
quote_page = url
# query the website and return the html to the variable page
hdr = {
'Moneyball': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
session = requests.Session()
req = session.get(quote_page, headers=hdr)
soup = BeautifulSoup(req.content, 'html.parser')
name_boxes = soup.findAll('a')
i = 0
playerurls = []
playernames = []
for box in name_boxes:
print('Another One')
#first 13 are useless
if (i >= 13):
#grabbing the raw HTML string from name_boxes
raw_html = str(name_boxes[i])
#using regex to find game_sport_year_ctl_id, org_id, and
#stats_player_seq to create the unique player urls
game_sport_year_ctl_id = re.findall(r'\bgame_sport_year_ctl_id=\d*', raw_html)
org_id = re.findall(r'\borg_id=\d*', raw_html)
stats_player_seq = re.findall(r'\bstats_player_seq=\d*', raw_html)
final_player_url = 'http://stats.ncaa.org/player/index?' + game_sport_year_ctl_id[0] + '&' + org_id[0] + '&' + stats_player_seq[0]
playerurls.append (final_player_url)
print('url being appended: \n' + str(final_player_url))
# print('Full name box: \n' + str(name_boxes[i]))
#need this to match up on PDFs later
#uses regex to find player names from raw_html
rex = re.compile(r'<a.*?>(.*?)</a>',re.S|re.M)
match = rex.match(raw_html)
if match:
name = match.groups()[0].strip()
else:
name = "No Player Name"
print('Player Being appended: ' + name)
# playernames.append(str(name_boxes[i])[97:-4])
playernames.append(name)
i += 1
allStatsForEveryone = []
j=0
session.close()
sleep(1)
#we need to visit each player page
for player in playerurls:
quote_page2 = player
hdr = {
'Moneyball': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
session = requests.Session()
req = session.get(quote_page2, headers=hdr)
soup2 = BeautifulSoup(req.content, 'html.parser')
print('Got player URL')
#getting what houses the data
dataFields = soup2.findAll('tr', attrs={'class': 'text'})
#Getting this seasons stats row
dataStr = str(dataFields[len(dataFields) - 1])
stats = []
#getting each individual statfrom that row
for k in range(len(dataStr) - 8):
if dataStr[k:k + 5] == '<div>':
stats.append("".join(dataStr[k + 25:k + 40].split()))
listOfStats = [2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 14, 15, 16, 17, 18, 23, 24]
# length 17 becasue counting stinks
statNames = ['BA (2)', 'OBPct(3)', 'SLGPct(4)', 'AB(5)', 'R(6)', 'H(7)', '2B(8)', '3B(9)', 'HR(11)', 'BB(13)', 'HBP(14)', 'RBI(15)','SF(16)', 'SH(17)', 'K(18)', 'SB(23)', 'CS(24)']
finalStats = []
#getting only the stats we want
for jk in range(len(stats)):
if jk in listOfStats:
toAdd = stats[jk]
#if the stat is blank, it wont be 0
if len(toAdd) < 6:
finalStats.append(toAdd)
else:
finalStats.append('0')
j += 1
#calculating woba
woba = round(float(float(finalStats[2]) + float(finalStats[1]) * 2) / 3, 3)
stealAttempts = (int(finalStats[13]) + int(finalStats[12]))
# firsbase = int(listOfStats[4]) - int(listOfStats[5]) - int(listOfStats[6]) - int(listOfStats[7])
finalStats.append(woba)
finalStats.append(stealAttempts)
# finalStats.append(firsbase)
statNames.append('WOBA')
statNames.append('SBA')
# statNames.append('1b')
allStatsForEveryone.append(finalStats)
session.close()
sleep(1)
pnames = pd.Series(playernames)
sdata = pd.Series(allStatsForEveryone)
data = pd.DataFrame({'Names': pnames, 'Stats': sdata})
return (data)
# if __name__ == "__main__":
# generateStats()
# if __name__ == "__main__":
# generateStats()
# <a href="/player/index?game_sport_year_ctl_id=13430&stats_player_seq=1648871">Cooper, Mikayla</a>
# print(firstTable.get("href"))
| 36.855643 | 188 | 0.491668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,292 | 0.305654 |
61b92037ab7683a5d5164e4e2f3fc2e47b6106da | 1,172 | py | Python | datasets.py | mokosaur/iris-recognition | 88a99a8b5a4a2c8377b8a92a76750d871373269f | [
"MIT"
] | 17 | 2017-11-11T11:55:13.000Z | 2021-06-11T13:51:32.000Z | datasets.py | pyxsqbs/iris-recognition | 88a99a8b5a4a2c8377b8a92a76750d871373269f | [
"MIT"
] | 5 | 2018-04-21T08:52:21.000Z | 2020-01-11T18:59:13.000Z | datasets.py | pyxsqbs/iris-recognition | 88a99a8b5a4a2c8377b8a92a76750d871373269f | [
"MIT"
] | 10 | 2018-03-07T09:10:19.000Z | 2021-08-31T09:27:48.000Z | import os
import numpy as np
def load_utiris():
"""Fetches NIR images from UTIRIS dataset.
Retrieves image paths and labels for each NIR image in the dataset. There should already exist a directory named
'UTIRIS V.1'. If it does not exist then download the dataset from the official page (https://utiris.wordpress.com/).
:return: A dictionary with two keys: 'data' contains all images paths, 'target' contains the image labels - each eye
gets its unique number.
"""
data = []
target = []
target_i = 0
index_used = False
for dirpath, dirnames, filenames in os.walk('UTIRIS V.1\\Infrared Images'):
for f in filenames:
if f.endswith('.bmp'):
data.append('{}\{}'.format(dirpath, f))
target.append(target_i)
index_used = True
if index_used:
target_i += 1
index_used = False
return {'data': np.array(data),
'target': np.array(target)}
# Example usage
if __name__ == '__main__':
import cv2
data = load_utiris()['data']
image = cv2.imread(data[0])
cv2.imshow('test', image)
cv2.waitKey(0)
| 30.051282 | 120 | 0.613481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.457338 |
61b9f66c78b8200c4c7baa5db2be449bedc33128 | 634 | py | Python | crescent/resources/s3/bucket/routing_rule.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | 1 | 2020-03-26T19:20:03.000Z | 2020-03-26T19:20:03.000Z | crescent/resources/s3/bucket/routing_rule.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | null | null | null | crescent/resources/s3/bucket/routing_rule.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | null | null | null | from crescent.core import Model
from .redirect_rule import RedirectRule
from .routing_rule_condition import RoutingRuleCondition
from .constants import ModelRequiredProperties
class RoutingRule(Model):
def __init__(self):
super(RoutingRule, self).__init__(required_properties=ModelRequiredProperties.ROUTING_RULE)
def RedirectRule(self, redirect_rule: RedirectRule):
return self._set_field(self.RedirectRule.__name__, redirect_rule)
def RoutingRuleCondition(self, routing_rule_condition: RoutingRuleCondition):
return self._set_field(self.RoutingRuleCondition.__name__, routing_rule_condition)
| 39.625 | 99 | 0.817035 | 455 | 0.717666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
61ba3d50811169b0b9f01a141d232b991d2868ad | 457 | py | Python | arranging.py | mallimuondu/Algorithims | 9e2ff15ab87c56a38aba18d0e31999e12c9a317e | [
"MIT"
] | null | null | null | arranging.py | mallimuondu/Algorithims | 9e2ff15ab87c56a38aba18d0e31999e12c9a317e | [
"MIT"
] | null | null | null | arranging.py | mallimuondu/Algorithims | 9e2ff15ab87c56a38aba18d0e31999e12c9a317e | [
"MIT"
] | null | null | null | input1 = int(input("Enter the first number: "))
input2 = int(input("Enter the second number: "))
input3 = int(input("Enter the third number: "))
input4 = int(input("Enter the fourth number: "))
input5 = int(input("Enter the fifth number: "))
tuple_num = []
tuple_num.append(input1)
tuple_num.append(input2)
tuple_num.append(input3)
tuple_num.append(input4)
tuple_num.append(input5)
print(tuple_num)
tuple_num.sort()
for a in tuple_num:
print(a * a) | 30.466667 | 49 | 0.722101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.299781 |
61bcb1723cde31df785e9875b137882312c8d564 | 1,086 | py | Python | tests/test_validation_settings.py | IndicoDataSolutions/finetune-transformer-lm | 3534658e5de281e5634c8481b0fb37635b0cb3af | [
"MIT"
] | null | null | null | tests/test_validation_settings.py | IndicoDataSolutions/finetune-transformer-lm | 3534658e5de281e5634c8481b0fb37635b0cb3af | [
"MIT"
] | null | null | null | tests/test_validation_settings.py | IndicoDataSolutions/finetune-transformer-lm | 3534658e5de281e5634c8481b0fb37635b0cb3af | [
"MIT"
] | null | null | null | import unittest
from finetune.util.input_utils import validation_settings
class TestValidationSettings(unittest.TestCase):
def test_validation_settings(self):
"""
Ensure LM only training does not error out
"""
val_size, val_interval = validation_settings(dataset_size=30, batch_size=4, val_size=0, val_interval=None, keep_best_model=False)
self.assertEqual(val_size, 0)
val_size, val_interval = validation_settings(dataset_size=80, batch_size=4, val_size=0.05, val_interval=None, keep_best_model=False)
self.assertEqual(val_size, 4)
self.assertEqual(val_interval, 4)
val_size, val_interval = validation_settings(dataset_size=80, batch_size=2, val_size=0.05, val_interval=None, keep_best_model=False)
self.assertEqual(val_size, 4)
self.assertEqual(val_interval, 8)
val_size, val_interval = validation_settings(dataset_size=400, batch_size=4, val_size=0.05, val_interval=None, keep_best_model=False)
self.assertEqual(val_size, 20)
self.assertEqual(val_interval, 20)
| 43.44 | 141 | 0.736648 | 1,009 | 0.929098 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.060773 |
61bd467498fdc058f0a60a3407be859056102776 | 1,743 | py | Python | 96. combination_sum.py | chandravenky/puzzles | 17ec86bbad43862830ba7059a448a33b232b4088 | [
"MIT"
] | null | null | null | 96. combination_sum.py | chandravenky/puzzles | 17ec86bbad43862830ba7059a448a33b232b4088 | [
"MIT"
] | null | null | null | 96. combination_sum.py | chandravenky/puzzles | 17ec86bbad43862830ba7059a448a33b232b4088 | [
"MIT"
] | 1 | 2022-03-13T02:04:46.000Z | 2022-03-13T02:04:46.000Z | import copy
def combination_sum(candidates, target):
def backtrack(first, curr=[]):
if sum(curr) == target:
if curr not in output:
output.append(copy.deepcopy(curr))
return
if sum(curr) >target:
return
for i in range(first, n):
curr.append(candidates[first])
#print(curr)
backtrack(i,curr)
curr.pop()
output = []
n = len(candidates)
for i in range(0, n):
backtrack(i,[])
return output
def combination_sum_test():
input_candidates1 = [2,3,6,7]
input_candidates2 = [2,3,5]
input_candidates3 = [2]
input_target1 = 7
input_target2 = 8
input_target3 = 1
expected_output1 = [[2,2,3],[7]]
expected_output2 = [[2,2,2,2],[2,3,3],[3,5]]
expected_output3 = []
return ( expected_output1 == combination_sum(input_candidates1, input_target1), expected_output2 == combination_sum(input_candidates2, input_target2), expected_output3 == combination_sum(input_candidates3, input_target3) )
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
def backtrack(first, curr=[]):
if sum(curr) == target:
if curr not in output:
output.append(copy.deepcopy(curr))
return
if sum(curr) >target:
return
for i in range(first, n):
curr.append(candidates[first])
#print(curr)
backtrack(i,curr)
curr.pop()
output = []
n = len(candidates)
for i in range(0, n):
backtrack(i,[])
return output
| 24.9 | 224 | 0.570281 | 763 | 0.437751 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.076305 |
61c22efab1014a2172a1411d32a305d07c0df66e | 6,086 | py | Python | claimreview/claimreview/parser.py | MartinoMensio/claimreview-scraper-other | f1105ef33d706610e8952b389e435590ff85439b | [
"MIT"
] | 2 | 2019-03-05T15:24:44.000Z | 2020-05-27T06:05:48.000Z | claimreview/claimreview/parser.py | MartinoMensio/claimreview-scraper-other | f1105ef33d706610e8952b389e435590ff85439b | [
"MIT"
] | null | null | null | claimreview/claimreview/parser.py | MartinoMensio/claimreview-scraper-other | f1105ef33d706610e8952b389e435590ff85439b | [
"MIT"
] | 1 | 2019-03-05T15:25:00.000Z | 2019-03-05T15:25:00.000Z | import json
import microdata
import dateutil.parser
class ClaimReviewParser(object):
name_fixes = {
'Politifact': 'PolitiFact'
}
def parse(self, response, language='en'):
items = self.get_microdata_items(response)
scripts = response.css(
'script[type="application/ld+json"]::text').extract()
for script in scripts:
item = json.loads(script)
if '@type' in item and (item['@type'] == 'ClaimReview' or 'ClaimReview' in item['@type']):
items.append(self.convert_claimreview(item))
elif '@type' in item and item['@type'] == 'WebPage' and 'mainEntity' in item and 'review' in item['mainEntity'] and item['mainEntity']['review'].get('@type') == 'ClaimReview':
items.append(self.convert_claimreview(
item['mainEntity']['review']))
elif '@graph' in item and isinstance(item['@graph'], list):
reviews = [obj for obj in item['@graph']
if obj.get('@type') == 'ClaimReview']
for review in reviews:
items.append(self.convert_claimreview(review))
for item in items:
item['language'] = language
return items
def convert_claimreview(self, item):
rr = item.get('reviewRating')
ir = item.get('itemReviewed')
url = item.get('url')
rv = rr.get('ratingValue')
return dict(
type=item.get('@type'),
datePublished=self.parse_date(item.get('datePublished')),
dateModified=self.parse_date(item.get('dateModified')),
url=url,
author=[
dict(
type=a.get('@type'),
name=self.fix_name(a.get('name')),
url=a.get('url'),
twitter=a.get('twitter'),
sameAs=[str(s) for s in self.listify(a.get('sameAs'))]
) for a in self.listify(item.get('author'))
],
claimReviewed=item.get('claimReviewed'),
reviewRating=dict(
type=rr.get('@type'),
ratingValue=rv,
bestRating=rr.get('bestRating'),
worstRating=rr.get('worstRating'),
alternateName=rr.get('alternateName')
),
itemReviewed=dict(
type=ir.get('@type'),
author=[
dict(
type=a.get('@type'),
name=a.get('name'),
url=a.get('url'),
twitter=a.get('twitter'),
sameAs=[str(s)
for s in self.listify(a.get('sameAs'))]
) for a in self.listify(ir.get('author'))
],
datePublished=self.parse_date(ir.get('datePublished')),
sameAs=[str(s) for s in self.listify(ir.get('sameAs'))]
),
)
def get_microdata_items(self, response):
items = microdata.get_items(response.text)
result = []
for item in items:
if 'ClaimReview' in str(item.itemtype[-1]):
rr = item.get('reviewRating')
img = item.get('image')
ir = item.get('itemReviewed')
url = str(item.get('url'))
result.append(
dict(
type=str(item.itemtype[-1]),
datePublished=self.parse_date(
item.get('datePublished')),
dateModified=self.parse_date(item.get('dateModified')),
url=url,
author=self.microdata_authors_from(item),
image=dict(
type=str(img.itemtype[-1]),
url=str(img.get('url')),
width=img.get('width'),
height=img.get('height')
) if img else None,
claimReviewed=item.get('claimReviewed'),
reviewRating=dict(
type=str(rr.itemtype[-1]),
ratingValue=rr.get('ratingValue'),
bestRating=rr.get('bestRating'),
worstRating=rr.get('worstRating'),
alternateName=rr.get(
'alternateName') or rr.get('name')
),
itemReviewed=dict(
type=str(ir.itemtype[-1]),
author=self.microdata_authors_from(ir),
datePublished=self.parse_date(
ir.get('datePublished')),
sameAs=[str(s) for s in ir.get_all('sameAs')]
),
keywords=str(item.get('keywords')) if item.get(
'keywords') else None,
)
)
return result
def microdata_authors_from(self, item):
return [
dict(
type=str(a.itemtype[-1]),
name=self.fix_name(a.get('name')),
url=str(a.get('url')) if a.get('url') else None,
twitter=str(a.get('twitter')) if a.get('twitter') else None,
sameAs=[str(s) for s in a.get_all('sameAs')]
) for a in item.get_all('author')
]
def listify(self, obj):
if obj == None:
return []
elif isinstance(obj, list):
return obj
else:
return [obj]
def parse_date(self, dateString):
if dateString is not None:
try:
return dateutil.parser.parse(dateString)
except ValueError:
return None
else:
return None
def fix_name(self, name):
return self.name_fixes.get(name, name)
| 37.801242 | 187 | 0.459086 | 6,031 | 0.990963 | 0 | 0 | 0 | 0 | 0 | 0 | 815 | 0.133914 |
61c26d6718f096ebcf35473494cb3a2610963d4a | 1,097 | py | Python | map/migrations/0001_initial.py | benjaoming/django-denmark.org | f708ce95720fd3452913a003ccf8179f869826c4 | [
"MIT"
] | null | null | null | map/migrations/0001_initial.py | benjaoming/django-denmark.org | f708ce95720fd3452913a003ccf8179f869826c4 | [
"MIT"
] | 1 | 2021-05-03T09:27:59.000Z | 2021-05-03T09:27:59.000Z | map/migrations/0001_initial.py | Nadiahansen15/django-denmark.org | d3be44764367a7c1059d7a0e896fbbc56ce1771f | [
"MIT"
] | null | null | null | # Generated by Django 2.0.3 on 2018-03-16 00:17
from django.conf import settings
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MapEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('name', models.CharField(blank=True, help_text="Leave blank if it's yourself", max_length=256, null=True, verbose_name='Name of place')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='mapentry',
unique_together={('owner', 'name')},
),
]
| 34.28125 | 154 | 0.640839 | 895 | 0.815861 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.142206 |
61c2fff5167df3eebef23e41dd213eb551eb1dca | 34 | py | Python | UNET_EM_DATASET/UNET_EM_DATASET_TERNARY/utility.py | hossein1387/U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation | f919935b975461052d5196372a837356379f973a | [
"MIT"
] | 69 | 2019-07-28T06:53:55.000Z | 2022-03-22T01:07:28.000Z | UNET_EM_DATASET/UNET_EM_DATASET_a8_0_w0_8/utility.py | hossein1387/Fixed-Point-U-Net-Quantization-for-Medical-Image-Segmentation | 4835a3d08b38617b952023f19bf59679126f5f59 | [
"MIT"
] | 6 | 2019-08-02T07:47:09.000Z | 2021-06-11T14:35:53.000Z | UNET_EM_DATASET/UNET_EM_DATASET_a8_0_w0_8/utility.py | hossein1387/Fixed-Point-U-Net-Quantization-for-Medical-Image-Segmentation | 4835a3d08b38617b952023f19bf59679126f5f59 | [
"MIT"
] | 20 | 2019-08-10T07:08:56.000Z | 2021-10-09T17:02:30.000Z | ../UNET_EM_DATASET_BASE/utility.py | 34 | 34 | 0.852941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
61c38035d99d5204568c03e276fa50692ae313a4 | 13,602 | py | Python | frozen_model/gap_model_triplet.py | Yorko/gender-unbiased_BERT-based_pronoun_resolution | 67d8c6b3fce94bbeb75bbc644a3111b168e7c25b | [
"Apache-2.0"
] | 47 | 2019-05-21T06:30:36.000Z | 2022-02-18T08:35:13.000Z | frozen_model/gap_model_triplet.py | Yorko/gender-unbiased_BERT-based_pronoun_resolution | 67d8c6b3fce94bbeb75bbc644a3111b168e7c25b | [
"Apache-2.0"
] | 1 | 2022-01-12T17:40:19.000Z | 2022-01-13T10:50:17.000Z | frozen_model/gap_model_triplet.py | Yorko/gender-unbiased_BERT-based_pronoun_resolution | 67d8c6b3fce94bbeb75bbc644a3111b168e7c25b | [
"Apache-2.0"
] | 6 | 2019-08-12T16:10:52.000Z | 2021-11-15T08:44:31.000Z | import pandas as pd
from tqdm import tqdm
import json
import time
import os
from keras import backend, models, layers, initializers, regularizers, constraints, optimizers
from keras import callbacks as kc
from sklearn.model_selection import cross_val_score, KFold, train_test_split, GroupKFold
from sklearn.metrics import log_loss
import numpy as np
import tensorflow as tf
import random as rn
from keras import backend as K
from tensorflow import set_random_seed
set_random_seed(17)
np.random.seed(17)
class GAPModelTriplet(object):
def __init__(
self,
n_layers=10,
embeddings_file={'train': 'input/emb10_64_cased_train.json', 'test': 'input/emb10_64_cased_test.json'}):
self.layer_indexes = [-(l + 1) for l in range(n_layers)]
self.embeddings_file = embeddings_file
self.buckets = [1, 2, 3, 4, 5, 8, 16, 32, 64]
self.init_seed(1122)
def init_seed(self, seed):
self.seed = seed
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
rn.seed(seed)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(seed)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def load_embeddings(self, filename, data, idx_map, idx_samples):
with open(filename) as f:
for line in tqdm(f):
sample = json.loads(line)
if sample['segment'] > 0:
continue
layers = []
for layer in sample['embeddings']:
layers += layer['values']
idx = sample['idx']
idx_map[idx] = len(data['emb'])
df_idx = sample['df_idx']
if df_idx not in idx_samples:
idx_samples[df_idx] = []
idx_samples[df_idx].append(len(data['emb']))
# y = [0, 0, 0]
# y[sample['label']] = 1
# data[name]['y'].append(y)
data['ids'].append(idx)
data['df_ids'].append(df_idx)
layers = np.array(layers)
data['emb'].append([layers, np.zeros(layers.shape), np.zeros(layers.shape)])
with open(filename) as f:
for line in tqdm(f):
sample = json.loads(line)
if sample['segment'] == 0:
continue
layers = []
for layer in sample['embeddings']:
layers += layer['values']
idx = sample['idx']
segment = sample['segment']
data['emb'][idx_map[idx]][segment] = np.array(layers)
return data, idx_map, idx_samples
def load_feats(self, filename, ids_filename, data, idx_samples):
feature_idx_map = []
with open(ids_filename) as f:
f.readline()
for line in f:
feature_idx_map.append(line.strip().split('\t')[0])
with open(filename) as f:
f.readline()
for i, line in enumerate(f):
arr = line.strip().split('\t')
feats = [float(x) for x in arr[1:]]
idx = feature_idx_map[int(arr[0])]
for sample_id in idx_samples[idx]:
data[sample_id] = feats
return data
def load_labels(self, filename, data, idx_samples):
df = pd.read_csv(filename, sep='\t')
df['label'] = 0
if 'A-coref' in df.columns:
df.loc[df['A-coref'] == True, 'label'] = 1
df.loc[df['B-coref'] == True, 'label'] = 2
for i, row in df.iterrows():
idx = row.ID
for sample_id in idx_samples[idx]:
data[sample_id][row['label']] = 1
return data
def get_model(self, input_shapes):
feat_shape = input_shapes[0]
emb_shapes = input_shapes[1:]
def build_emb_model(input_shape, emb_num=0, dense_layer_size=128, dropout_rate=0.9):
X_input = layers.Input([input_shape])
X = layers.Dense(dense_layer_size, name='emb_dense_{}'.format(emb_num),
kernel_initializer=initializers.glorot_uniform(seed=self.seed))(X_input)
X = layers.BatchNormalization(name='emb_bn_{}'.format(emb_num))(X)
X = layers.Activation('relu')(X)
X = layers.Dropout(dropout_rate, seed=self.seed)(X)
# Create model
model = models.Model(inputs=X_input, outputs=X, name='emb_model_{}'.format(emb_num))
return model
emb_models = []
for i, emb_shape in enumerate(emb_shapes):
triplet_input = layers.Input([emb_shape])
triplet_model_shape = int(emb_shape / 3)
triplet_model = build_emb_model(triplet_model_shape, emb_num=i, dense_layer_size=112)
P = layers.Lambda(lambda x: x[:, :triplet_model_shape])(triplet_input)
A = layers.Lambda(lambda x: x[:, triplet_model_shape: triplet_model_shape * 2])(triplet_input)
B = layers.Lambda(lambda x: x[:, triplet_model_shape * 2: triplet_model_shape * 3])(triplet_input)
A_out = triplet_model(layers.Subtract()([P, A]))
B_out = triplet_model(layers.Subtract()([P, B]))
triplet_out = layers.concatenate([A_out, B_out], axis=-1)
merged_model = models.Model(inputs=triplet_input, outputs=triplet_out, name='triplet_model_{}'.format(i))
emb_models.append(merged_model)
emb_num = len(emb_models)
emb_models += [build_emb_model(emb_shape, emb_num=i + emb_num, dense_layer_size=112) for i, emb_shape in
enumerate(emb_shapes)]
def build_feat_model(input_shape, dense_layer_size=128, dropout_rate=0.8):
X_input = layers.Input([input_shape])
X = layers.Dense(dense_layer_size, name='feat_dense_{}'.format(0),
kernel_initializer=initializers.glorot_normal(seed=self.seed))(X_input)
X = layers.Activation('relu')(X)
X = layers.Dropout(dropout_rate, seed=self.seed)(X)
# Create model
model = models.Model(inputs=X_input, outputs=X, name='feat_model')
return model
feat_model = build_feat_model(feat_shape, dense_layer_size=128)
lambd = 0.02 # L2 regularization
# Combine all models into one model
merged_out = layers.concatenate([feat_model.output] + [emb_model.output for emb_model in emb_models])
merged_out = layers.Dense(3, name='merged_output', kernel_regularizer=regularizers.l2(lambd),
kernel_initializer=initializers.glorot_uniform(seed=self.seed))(merged_out)
merged_out = layers.BatchNormalization(name='merged_bn')(merged_out)
merged_out = layers.Activation('softmax')(merged_out)
combined_model = models.Model([feat_model.input] + [emb_model.input for emb_model in emb_models],
outputs=merged_out, name='merged_model')
# print(combined_model.summary())
return combined_model
def train_model(self, embeddings, features, input_files):
learning_rate = 0.02
decay = 0.03
n_fold = 5
batch_size = 64
epochs = 10000
patience = 50
# n_test = 100
test_ids = {}
test_ids_list = []
Y_test = []
for model_i, embeddings_files in enumerate(embeddings):
for name in ['test', 'train']:
print('Processing {} datasets'.format(name))
for file_i, embeddings_file in enumerate(embeddings_files[name]):
if file_i == 0:
data, idx_map, idx_samples = self.load_embeddings(
embeddings_file,
data={'emb': [], 'ids': [], 'df_ids': []},
idx_map={},
idx_samples={})
else:
data, idx_map, idx_samples = self.load_embeddings(embeddings_file, data, idx_map, idx_samples)
for i, emb in enumerate(data['emb']):
data['emb'][i] = np.concatenate(emb)
if model_i == 0:
feats = [[] for _ in range(len(data['emb']))]
labels = [[0, 0, 0] for _ in range(len(data['emb']))]
if name == 'train':
X_emb_train = [np.array(data['emb'])]
else:
X_emb_test = [np.array(data['emb'])]
# Load features
for features_i, filename in enumerate(features[name]):
feats = self.load_feats(filename, features['{}_ids'.format(name)][features_i], feats,
idx_samples)
if name == 'train':
X_feats_train = np.array(feats)
else:
X_feats_test = np.array(feats)
# Load labels
for filename in input_files[name]:
labels = self.load_labels(filename, labels, idx_samples)
if name == 'train':
Y_train = np.array(labels)
else:
for data_i, idx in enumerate(data['df_ids']):
if idx not in test_ids:
test_ids_list.append(idx)
test_ids[idx] = len(test_ids)
Y_test.append(labels[data_i])
Y_test = np.array(Y_test)
else:
if name == 'train':
X_emb_train.append(np.array(data['emb']))
else:
X_emb_test.append(np.array(data['emb']))
print('Train shape:', [x.shape for x in X_emb_train], X_feats_train.shape)
print('Test shape:', [x.shape for x in X_emb_test], X_feats_train.shape)
# Normalise feats
need_normalisation = True
if need_normalisation:
all_feats = np.concatenate([X_feats_train, X_feats_test])
all_max = np.max(all_feats, axis=0)
X_feats_train /= all_max
X_feats_test /= all_max
model_shapes = [X_feats_train.shape[1]] + [x.shape[1] for x in X_emb_train]
X_test = [X_feats_test] + X_emb_test + X_emb_test
Y_test = np.array(Y_test)
prediction = np.zeros((len(test_ids), 3)) # testing predictions
prediction_cnt = np.zeros((len(test_ids), 3)) # testing predictions counts
# for seed in [1, 6033, 100007]:
for seed in [1122]:
# for seed in [1, ]:
# self.init_seed(seed)
# Training and cross-validation
# folds = GroupKFold(n_splits=n_fold)
folds = KFold(n_splits=n_fold, shuffle=True, random_state=seed)
scores = []
# for fold_n, (train_index, valid_index) in enumerate(folds.split(X_emb_train, groups=groups)):
for fold_n, (train_index, valid_index) in enumerate(folds.split(Y_train)):
# split training and validation data
print('Fold', fold_n, 'started at', time.ctime())
X_tr = [X_feats_train[train_index]] + [x[train_index] for x in X_emb_train] + [x[train_index] for x in
X_emb_train]
X_val = [X_feats_train[valid_index]] + [x[valid_index] for x in X_emb_train] + [x[valid_index] for x in
X_emb_train]
Y_tr, Y_val = Y_train[train_index], Y_train[valid_index]
# Define the model, re-initializing for each fold
classif_model = self.get_model(model_shapes)
classif_model.compile(optimizer=optimizers.Adam(lr=learning_rate, decay=decay),
loss="categorical_crossentropy")
callbacks = [kc.EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True)]
# train the model
classif_model.fit(x=X_tr, y=Y_tr, epochs=epochs, batch_size=batch_size, callbacks=callbacks,
validation_data=(X_val, Y_val), verbose=0)
# make predictions on validation and test data
pred_valid = classif_model.predict(x=X_val, verbose=0)
pred = classif_model.predict(x=X_test, verbose=0)
print('Stopped at {}, score {}'.format(callbacks[0].stopped_epoch, log_loss(Y_val, pred_valid)))
scores.append(log_loss(Y_val, pred_valid))
for i, idx in enumerate(test_ids_list):
prediction[test_ids[idx]] += pred[i]
prediction_cnt[test_ids[idx]] += np.ones(3)
# Print CV scores, as well as score on the test data
print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))
print(scores)
print("Test score (original dirty labels):", log_loss(Y_test, prediction / prediction_cnt))
prediction /= prediction_cnt
# Write the prediction to file for submission
np.savetxt('test_prediction.txt', prediction)
| 44.306189 | 119 | 0.551978 | 13,096 | 0.9628 | 0 | 0 | 0 | 0 | 0 | 0 | 1,540 | 0.113219 |
61c48854a1a1f81599731dfa93278a647742bfa8 | 430 | py | Python | plotly/graph_objs/scattermapbox/__init__.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/graph_objs/scattermapbox/__init__.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/graph_objs/scattermapbox/__init__.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | from ._unselected import Unselected
from plotly.graph_objs.scattermapbox import unselected
from ._textfont import Textfont
from ._stream import Stream
from ._selected import Selected
from plotly.graph_objs.scattermapbox import selected
from ._marker import Marker
from plotly.graph_objs.scattermapbox import marker
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.scattermapbox import hoverlabel
| 35.833333 | 54 | 0.862791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
61c555edfec0e4473d4111b7bd23857c81fc59d2 | 140 | py | Python | Javatar.py | evandrocoan/Javatar | b38d4f9d852565d6dcecb236386628b4e56d9d09 | [
"MIT"
] | 142 | 2015-01-11T19:43:17.000Z | 2021-11-15T11:44:56.000Z | Javatar.py | evandroforks/Javatar | b38d4f9d852565d6dcecb236386628b4e56d9d09 | [
"MIT"
] | 46 | 2015-01-02T20:29:37.000Z | 2018-09-15T05:12:52.000Z | Javatar.py | evandroforks/Javatar | b38d4f9d852565d6dcecb236386628b4e56d9d09 | [
"MIT"
] | 25 | 2015-01-16T01:33:39.000Z | 2022-01-07T11:12:43.000Z | from .commands import *
from .core.event_handler import *
from .utils import (
Constant
)
def plugin_loaded():
Constant.startup()
| 14 | 33 | 0.707143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
61c5fab62fcc113bff6d3faa8d1850fad0a74177 | 1,584 | py | Python | sample/posts/filters.py | pine2104/docker_uwsgi_nginx_django | 47b1618048131fa7c654b9144a4b3372b9662825 | [
"MIT"
] | null | null | null | sample/posts/filters.py | pine2104/docker_uwsgi_nginx_django | 47b1618048131fa7c654b9144a4b3372b9662825 | [
"MIT"
] | null | null | null | sample/posts/filters.py | pine2104/docker_uwsgi_nginx_django | 47b1618048131fa7c654b9144a4b3372b9662825 | [
"MIT"
] | null | null | null | from .models import Post, Jcpaper
import django_filters
from django import forms
from django_filters.widgets import RangeWidget
class PostFilter(django_filters.FilterSet):
title = django_filters.CharFilter(
lookup_expr='icontains',
widget=forms.TextInput(attrs={'class': 'form-control'})
)
content = django_filters.CharFilter(
lookup_expr='icontains',
widget=forms.TextInput(attrs={'class': 'form-control'})
)
date_posted = django_filters.DateFromToRangeFilter(
field_name="date_posted",
lookup_expr='gte',
widget=RangeWidget(attrs={'type': 'date'})
)
class Meta:
model = Post
fields = ['title', 'content', 'category', 'author', 'date_posted']
class JCFilter(django_filters.FilterSet):
title = django_filters.CharFilter(
lookup_expr='icontains',
widget=forms.TextInput(attrs={'class': 'form-control'})
)
journal = django_filters.CharFilter(
lookup_expr='icontains',
widget=forms.TextInput(attrs={'class': 'form-control'})
)
hwl_recommend = django_filters.BooleanFilter()
content = django_filters.CharFilter(
lookup_expr='icontains',
widget=forms.TextInput(attrs={'class': 'form-control'})
)
date_posted = django_filters.DateFromToRangeFilter(
field_name="date_posted",
lookup_expr='gte',
widget=RangeWidget(attrs={'type': 'date'})
)
class Meta:
model = Jcpaper
fields = ['title', 'journal', 'hwl_recommend', 'content', 'presenter', 'date_posted'] | 29.886792 | 93 | 0.657828 | 1,453 | 0.917298 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.208965 |
61c62c3d85b347faa604f4c73ff8c372c3a358fc | 11,293 | py | Python | src/odinapi/utils/datamodel.py | Odin-SMR/odin-api | dea75016a6f9e61be0dd64a698b03ded5a6f0f10 | [
"MIT"
] | null | null | null | src/odinapi/utils/datamodel.py | Odin-SMR/odin-api | dea75016a6f9e61be0dd64a698b03ded5a6f0f10 | [
"MIT"
] | 85 | 2020-04-01T06:24:29.000Z | 2022-03-28T04:28:56.000Z | src/odinapi/utils/datamodel.py | Odin-SMR/odin-api | dea75016a6f9e61be0dd64a698b03ded5a6f0f10 | [
"MIT"
] | null | null | null | import attr
from typing import List, Any, Dict, Union
import datetime as dt
from enum import Enum, unique, auto
from dateutil.relativedelta import relativedelta
import numpy as np # type: ignore
DATEFMT = "%Y-%m-%dT%H:%M:%SZ"
COMMON_FILE_HEADER_DATA = {
"creator_name": 'Donal Murtagh',
"creator_url": 'odin.rss.chalmers.se',
"creator_email": 'donal.murtagh@chalmers.se',
"address": '412 96 Gothenburg, Sweden',
"institution": 'Chalmers University of Technology',
"platform": 'Odin',
"sensor": 'SMR',
"version_l1b": "8",
"version_l2": "3.0.0"
}
@unique
class L2Type(Enum):
l2 = auto()
l2i = auto()
l2anc = auto()
@unique
class L2ancDesc(Enum):
LST = "Mean local solar time for the scan."
Orbit = "Odin/SMR orbit number."
SZA1D = (
"Mean solar zenith angle of the observations used in the retrieval "
"process.")
SZA = (
"Approximate solar zenith angle corresponding to each retrieval"
" value.")
Theta = "Estimate of the potential temperature profile."
@property
def l2type(self) -> L2Type:
return L2Type.l2anc
@unique
class L2Desc(Enum):
Altitude = "Altitude of retrieved values."
Apriori = "A priori profile used in the inversion algorithm."
AVK = "Averaging kernel matrix."
ErrorNoise = (
"Error due to measurement thermal noise (square root of the "
"diagonal elements of the corresponding error matrix).")
ErrorTotal = (
"Total retrieval error, corresponding to the error due to thermal"
" noise and all interfering smoothing errors (square root of the"
" diagonal elements of the corresponding error matrix).")
InvMode = "Inversion mode."
Lat1D = "A scalar representative latitude of the retrieval."
Latitude = "Approximate latitude of each retrieval value."
Lon1D = "A scalar representative longitude of the retrieval."
Longitude = "Approximate longitude of each retrieval value."
MeasResponse = (
"Measurement response, defined as the row sum of the averaging"
" kernel matrix.")
Pressure = "Pressure grid of the retrieved profile."
Profile = "Retrieved temperature or volume mixing ratio profile."
Quality = "Quality flag."
ScanID = "Satellite time word scan identifier."
Temperature = (
"Estimate of the temperature profile (corresponding to the"
" ZPT input data).")
Time = "Mean time of the scan."
VMR = "Volume mixing ratio or retrieved profile."
@property
def l2type(self) -> L2Type:
return L2Type.l2
@unique
class L2iDesc(Enum):
GenerationTime = "Processing date."
Residual = (
"The difference between the spectra matching retrieved state and used "
"measurement spectra"
)
MinLmFactor = (
"The minimum value of the Levenberg - Marquardt factor during "
"the OEM iterations"
)
FreqMode = "Odin/SMR observation frequency mode."
@property
def l2type(self) -> L2Type:
return L2Type.l2i
@unique
class DType(Enum):
i8 = "i8"
f4 = "f4"
double = "double"
@unique
class Dimension(Enum):
d1 = ["time"]
d2 = ["time", "level"]
d3 = ["time", "level", "level"]
@unique
class Unit(Enum):
time = "days since 1858-11-17 00:00"
altitude = "m"
lat = "degrees north"
lon = "degrees east"
hours = "hours"
unitless = "-"
pressure = "Pa"
temperature = "K"
degrees = "degrees"
koverk = "K/K"
poverp = "%/%"
product = "product"
@attr.s
class Parameter:
description = attr.ib(type=Union[L2Desc, L2ancDesc, L2iDesc])
unit = attr.ib(type=Unit)
dtype = attr.ib(type=DType)
dimension = attr.ib(type=Dimension)
@property
def name(self) -> str:
return self.description.name
@property
def l2type(self) -> L2Type:
return self.description.l2type
def get_description(self, istemperature: bool) -> str:
if self.description == L2Desc.Profile:
return (
"Retrieved temperature profile."
if istemperature
else "Retrieved volume mixing ratio."
)
return self.description.value
def get_unit(self, istemperature: bool) -> Unit:
if self.description == L2Desc.AVK:
return Unit.koverk if istemperature else Unit.poverp
elif self.unit != Unit.product:
return self.unit
else:
return (
Unit.temperature if istemperature
else Unit.unitless
)
@attr.s
class L2File:
parameters = attr.ib(type=List[Parameter])
@attr.s
class Filter:
residual = attr.ib(type=float)
minlmfactor = attr.ib(type=float)
@attr.s
class L2anc:
LST = attr.ib(type=float)
Orbit = attr.ib(type=int)
SZA1D = attr.ib(type=float)
SZA = attr.ib(type=List[float])
Theta = attr.ib(type=List[float])
@attr.s
class L2:
InvMode = attr.ib(type=str)
ScanID = attr.ib(type=int)
Time = attr.ib(type=dt.datetime)
Lat1D = attr.ib(type=float)
Lon1D = attr.ib(type=float)
Quality = attr.ib(type=float)
Altitude = attr.ib(type=List[float])
Pressure = attr.ib(type=List[float])
Profile = attr.ib(type=List[float])
Latitude = attr.ib(type=List[float])
Longitude = attr.ib(type=List[float])
Temperature = attr.ib(type=List[float])
ErrorTotal = attr.ib(type=List[float])
ErrorNoise = attr.ib(type=List[float])
MeasResponse = attr.ib(type=List[float])
Apriori = attr.ib(type=List[float])
VMR = attr.ib(type=List[float])
AVK = attr.ib(type=List[List[float]])
@attr.s
class L2i:
GenerationTime = attr.ib(type=dt.datetime)
Residual = attr.ib(type=float)
MinLmFactor = attr.ib(type=float)
FreqMode = attr.ib(type=int)
@property
def filter(self) -> Filter:
return Filter(
residual=1.5,
minlmfactor=10. if self.FreqMode in [8., 13., 19.] else 2.
)
def isvalid(self) -> bool:
return (
np.isfinite(self.Residual)
and np.isfinite(self.MinLmFactor)
and self.Residual <= self.filter.residual
and self.MinLmFactor <= self.filter.minlmfactor
)
@attr.s
class L2Full:
l2i = attr.ib(type=L2i)
l2anc = attr.ib(type=L2anc)
l2 = attr.ib(type=L2)
# validators connect this class to L2iDesc, L2iDesc, L2Desc, and Parameter
@l2i.validator
def _check_includes_all_l2idesc_attributes(self, attribute, value):
assert all([hasattr(self.l2i, v.name) for v in L2iDesc])
@l2anc.validator
def _check_includes_all_l2ancdesc_attributes(self, attribute, value):
assert all([hasattr(self.l2anc, v.name) for v in L2ancDesc])
@l2.validator
def _check_includes_all_l2desc_attributes(self, attribute, value):
assert all([hasattr(self.l2, v.name) for v in L2Desc])
def get_data(self, parameter: Parameter):
if parameter.l2type is L2Type.l2i:
return getattr(self.l2i, parameter.name)
elif parameter.l2type is L2Type.l2anc:
return getattr(self.l2anc, parameter.name)
return getattr(self.l2, parameter.name)
def get_file_header_data(
freqmode: int,
invmode: str,
product: str,
time_coverage_start: dt.datetime,
time_coverage_end: dt.datetime
) -> Dict[str, str]:
header_data = {
"observation_frequency_mode": str(freqmode),
"inversion_mode": invmode,
"level2_product_name": product,
"date_created": dt.datetime.utcnow().strftime(DATEFMT),
"time_coverage_start": time_coverage_start.strftime(DATEFMT),
"time_coverage_end": time_coverage_end.strftime(DATEFMT)
}
header_data.update(COMMON_FILE_HEADER_DATA)
return header_data
def to_l2(l2: Dict[str, Any], product: str) -> L2:
profile = (
l2["Temperature"] if is_temperature(product)
else l2["VMR"]
)
return L2(
InvMode=l2["InvMode"],
ScanID=l2["ScanID"],
Time=dt.datetime(1858, 11, 17) + relativedelta(days=l2["MJD"]),
Lat1D=l2["Lat1D"],
Lon1D=l2["Lon1D"],
Quality=l2["Quality"],
Altitude=l2["Altitude"],
Pressure=l2["Pressure"],
Profile=profile,
Latitude=l2["Latitude"],
Longitude=l2["Longitude"],
Temperature=l2["Temperature"],
ErrorTotal=l2["ErrorTotal"],
ErrorNoise=l2["ErrorNoise"],
MeasResponse=l2["MeasResponse"],
Apriori=l2["Apriori"],
VMR=l2["VMR"],
AVK=l2["AVK"],
)
def to_l2anc(l2: Dict[str, Any]) -> L2anc:
return L2anc(
LST=l2["LST"],
Orbit=l2["Orbit"],
SZA1D=l2["SZA1D"],
SZA=l2["SZA"],
Theta=l2["Theta"],
)
def to_l2i(l2: Dict[str, Any]) -> L2i:
return L2i(
GenerationTime=dt.datetime.strptime(l2["GenerationTime"], DATEFMT),
Residual=l2["Residual"],
MinLmFactor=l2["MinLmFactor"],
FreqMode=l2["FreqMode"],
)
def generate_filename(
project: str, product: str, date_start: dt.datetime) -> str:
return "Odin-SMR_L2_{project}_{product}_{year}-{month:02}.nc".format(
project=project,
product=product.replace(
" / ", "-").replace(" - ", "-").replace(" ", "-"),
year=date_start.year,
month=date_start.month
)
def is_temperature(product: str) -> bool:
return "Temperature" in product
L2FILE = L2File([
Parameter(
L2iDesc.GenerationTime, Unit.time, DType.f4, Dimension.d1
),
Parameter(
L2Desc.Altitude, Unit.altitude, DType.f4, Dimension.d2
),
Parameter(
L2Desc.Apriori, Unit.product, DType.f4, Dimension.d2
),
Parameter(
L2Desc.AVK, Unit.product, DType.f4, Dimension.d3
),
Parameter(
L2Desc.ErrorNoise, Unit.product, DType.f4, Dimension.d2
),
Parameter(
L2Desc.ErrorTotal, Unit.product, DType.f4, Dimension.d2
),
Parameter(
L2Desc.Lat1D, Unit.lat, DType.f4, Dimension.d1
),
Parameter(
L2Desc.Latitude, Unit.lat, DType.f4, Dimension.d2
),
Parameter(
L2Desc.Lon1D, Unit.lon, DType.f4, Dimension.d1
),
Parameter(
L2Desc.Longitude, Unit.lon, DType.f4, Dimension.d2
),
Parameter(
L2ancDesc.LST, Unit.hours, DType.f4, Dimension.d1
),
Parameter(
L2Desc.MeasResponse, Unit.unitless, DType.f4, Dimension.d2
),
Parameter(
L2ancDesc.Orbit, Unit.unitless, DType.f4, Dimension.d1
),
Parameter(
L2Desc.Pressure, Unit.pressure, DType.f4, Dimension.d2
),
Parameter(
L2Desc.Profile, Unit.product, DType.f4, Dimension.d2
),
Parameter(
L2Desc.ScanID, Unit.unitless, DType.i8, Dimension.d1
),
Parameter(
L2ancDesc.SZA1D, Unit.degrees, DType.f4, Dimension.d1
),
Parameter(
L2ancDesc.SZA, Unit.degrees, DType.f4, Dimension.d2
),
Parameter(
L2Desc.Temperature, Unit.temperature, DType.f4, Dimension.d2
),
Parameter(
L2ancDesc.Theta, Unit.temperature, DType.f4, Dimension.d2
),
Parameter(
L2Desc.Time, Unit.time, DType.double, Dimension.d1
)
])
| 27.815271 | 79 | 0.622864 | 6,501 | 0.575666 | 0 | 0 | 6,613 | 0.585584 | 0 | 0 | 2,542 | 0.225095 |
61c791ad15060de57be0b1ac1c15f970b14dce6c | 2,549 | py | Python | regparser/layer/external_citations.py | cfpb/regulations-parser | 9b6e1ab2dbec93a915eb6da9a2d88c723b9ac424 | [
"CC0-1.0"
] | 36 | 2015-01-05T21:17:36.000Z | 2020-04-28T21:02:55.000Z | regparser/layer/external_citations.py | ascott1/regulations-parser | 1d653ec2d78c9cbfd3b0c651788e5ab14dcc76ca | [
"CC0-1.0"
] | 49 | 2015-01-28T15:54:25.000Z | 2018-08-20T20:20:08.000Z | regparser/layer/external_citations.py | ascott1/regulations-parser | 1d653ec2d78c9cbfd3b0c651788e5ab14dcc76ca | [
"CC0-1.0"
] | 23 | 2015-01-28T15:34:18.000Z | 2021-02-20T10:53:34.000Z | # vim: set encoding=utf-8
from collections import defaultdict
from regparser.grammar import external_citations as grammar
from layer import Layer
class ExternalCitationParser(Layer):
# The different types of citations
CODE_OF_FEDERAL_REGULATIONS = 'CFR'
UNITED_STATES_CODE = 'USC'
PUBLIC_LAW = 'PUBLIC_LAW'
STATUTES_AT_LARGE = 'STATUTES_AT_LARGE'
def citation_type(self, citation):
""" Based on the citation parsed, return the type of the citation. """
if citation[1] == 'CFR':
return ExternalCitationParser.CODE_OF_FEDERAL_REGULATIONS
elif citation[1] == 'U.S.C.' or 'Act' in citation:
return ExternalCitationParser.UNITED_STATES_CODE
elif 'Public' in citation and 'Law' in citation:
return ExternalCitationParser.PUBLIC_LAW
elif 'Stat.' in citation:
return ExternalCitationParser.STATUTES_AT_LARGE
def reformat_citation(self, citation):
""" Strip out unnecessary elements from the citation reference, so that
the various types of citations are presented consistently. """
if 'Act' in citation:
citation = self.act_citation
return [c for c in citation if c not in [
'U.S.C.', 'CFR', 'part', '.', 'Public', 'Law', '-']]
def parse(self, text, parts=None):
""" Parse the provided text, pulling out all the citations. """
parser = grammar.regtext_external_citation
cm = defaultdict(list)
citation_strings = {}
for citation, start, end in parser.scanString(text):
# Citations of the form XX CFR YY should be ignored if they are of
# the title/part being parsed (as they aren't external citations)
if (citation[0] != self.cfr_title or citation[1] != 'CFR'
or citation[2] != parts[0]):
index = "-".join(citation)
cm[index].append([start, end])
citation_strings[index] = citation.asList()
def build_layer_element(k, offsets):
layer_element = {
'offsets': offsets,
'citation': self.reformat_citation(citation_strings[k]),
'citation_type': self.citation_type(citation_strings[k])
}
return layer_element
return [build_layer_element(k, offsets) for k, offsets in cm.items()]
def process(self, node):
citations_list = self.parse(node.text, parts=node.label)
if citations_list:
return citations_list
| 40.460317 | 79 | 0.630836 | 2,399 | 0.941153 | 0 | 0 | 0 | 0 | 0 | 0 | 629 | 0.246763 |
61c7e61fa0692b457a20ea1dbff37e3628444c4b | 2,521 | py | Python | Server/app.py | Ali-Elganzory/High-Ping | 9d983eb2f56779d0893dd387ffeecf9b80007944 | [
"MIT"
] | 3 | 2020-06-28T02:18:22.000Z | 2021-01-07T19:15:13.000Z | Server/app.py | Ali-Elganzory/High-Ping | 9d983eb2f56779d0893dd387ffeecf9b80007944 | [
"MIT"
] | null | null | null | Server/app.py | Ali-Elganzory/High-Ping | 9d983eb2f56779d0893dd387ffeecf9b80007944 | [
"MIT"
] | null | null | null | import eventlet
import socketio
sio = socketio.Server(async_mode='eventlet')
app = socketio.WSGIApp(sio)
rooms = {}
instructors = {}
clients_in_rooms = {}
@sio.event
def connect(sid, environ):
print("[Server] ", sid, "connected")
@sio.event
def create_room(sid, name, room):
if room in rooms:
print("[Server] ", sid, "failed to create", room)
return {'created': False}
else:
instructors[sid] = room
rooms[room] = {"instructor_id": sid, "instructor_name": name, "clients": {}}
sio.enter_room(sid, room)
print("[Server] ", sid, "created", room)
return {'created': True}
@sio.event
def enter_room(sid, name, room):
if room in rooms:
rooms[room]["clients"][sid] = name
clients_in_rooms[sid] = {"room": room, "name": name}
sio.enter_room(sid, room)
sio.emit('someone entered', f"{name} entered the room", room=room)
print("[Server] ", sid, "entered", room)
return {'entered': True, "instructor_name": rooms[room]["instructor_name"]}
else:
print("[Server] ", sid, "failed to enter", room)
return {'entered': False}
@sio.event
def screen_update(sid, update):
sio.emit('screen_update', update, room=instructors[sid])
@sio.event
def leave_room(sid, room):
if sid in instructors:
sio.emit('room_closed', f"Host left; thus, {instructors[sid]} is closed", room=instructors[sid])
sio.close_room(instructors[sid])
rooms.pop(instructors[sid])
instructors.pop(sid)
elif sid in clients_in_rooms:
sio.emit('someone left', f"{clients_in_rooms[sid]['name']} left the room", room=clients_in_rooms[sid]['room'])
sio.leave_room(sid, clients_in_rooms[sid]["room"])
rooms[clients_in_rooms[sid]["room"]]["clients"].pop(sid)
clients_in_rooms.pop(sid)
@sio.event
def disconnect(sid):
if sid in instructors:
sio.emit('room_closed', f"Host left; thus, {instructors[sid]} is closed", room=instructors[sid])
rooms.pop(instructors[sid])
sio.close_room(instructors[sid])
instructors.pop(sid)
elif sid in clients_in_rooms:
sio.emit('someone left', f"{clients_in_rooms[sid]['name']} left the room", room=clients_in_rooms[sid]['room'])
rooms[clients_in_rooms[sid]["room"]]["clients"].pop(sid)
clients_in_rooms.pop(sid)
print("[Server] ", sid, "disconnected")
if __name__ == "__main__":
eventlet.wsgi.server(eventlet.listen(('', 5000)), app, debug=True)
| 31.5125 | 118 | 0.638239 | 0 | 0 | 0 | 0 | 2,246 | 0.890916 | 0 | 0 | 650 | 0.257834 |
61c8c91a42b220ed80fd252d6e8e255457b5045e | 2,141 | py | Python | bot.py | Shikib/twiliobot | 0077dfc2c1451243839bda4d05e6692d3d70391c | [
"MIT"
] | null | null | null | bot.py | Shikib/twiliobot | 0077dfc2c1451243839bda4d05e6692d3d70391c | [
"MIT"
] | null | null | null | bot.py | Shikib/twiliobot | 0077dfc2c1451243839bda4d05e6692d3d70391c | [
"MIT"
] | null | null | null | from telegram import Updater
from twilio.rest import TwilioRestClient
TELEGRAM_TOKEN = "INSERT_YOUR_TOKEN_HERE"
TWILIO_ACCOUNT_SID = "INSERT_ACCOUNT_SID_HERE"
TWILIO_AUTH_TOKEN = "INSERT_AUTH_TOKEN_HERE"
# initialize telegram updater and dispatcher
updater = Updater(token=TELEGRAM_TOKEN)
dispatcher = updater.dispatcher
# initialize twilio client
client = TwilioRestClient(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
def call(bot, chat_id, cmd_text):
# set source_number to be a number associated with the account
source_number = "+18668675309"
# cmd_text should be in format [Phone Number] [Message]
cmd_words = cmd_text.split()
phone_number = cmd_words[0]
message_text = cmd_words[1:] if len(cmd_words) > 1 else "Yes"
# Use TWILIO's API to call based on the cmd_text
call = client.calls.create(
url="http://demo.twilio.com/docs/voice.xml",
to=phone_number,
from=source_number)
# Send a confirmation message to the issuer of the command
msg = "Sent a call to the phone number with the default message."
bot.sendMessage(chat_id=chat_id, text=msg)
def text(bot, chat_id, cmd_text):
# set source_number to be a number associated with the account
source_number = "+18668675309"
# cmd_text should be in format [Phone Number] [Message]
cmd_words = cmd_text.split()
phone_number = cmd_words[0]
message_text = cmd_words[1:] if len(cmd_words) > 1 else "Yes"
# Use TWILIO's API to text based on the cmd_text
call = client.calls.create(
to=phone_number,
from=source_number,
body=message_text)
# Send a confirmation message to the issuer of the command
msg = "Sent a text to the phone number with your message."
bot.sendMessage(chat_id=chat_id, text=msg)
def main(bot, update):
message_text = update.message_text
chat_id = update.message.chat_id
words = message_text.split()
cmd = words[0]
cmd_text = ' '.join(words[1:]) if len(words) > 1 else ''
if cmd == '!call':
call(bot, chat_id, cmd_text)
elif cmd == '!text':
text(bot, chat_id, cmd_text)
dispatcher.addTelegramMessageHandler(main)
updater.start_polling()
| 31.028986 | 67 | 0.726763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 796 | 0.371789 |
4ede741a2bc6f6afaf8e5917cde1bda2865c5415 | 899 | py | Python | Python/construct-the-lexicographically-largest-valid-sequence.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/construct-the-lexicographically-largest-valid-sequence.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/construct-the-lexicographically-largest-valid-sequence.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | # Time: O(n!)
# Space: O(n)
class Solution(object):
def constructDistancedSequence(self, n):
"""
:type n: int
:rtype: List[int]
"""
def backtracking(n, i, result, lookup):
if i == len(result):
return True
if result[i]:
return backtracking(n, i+1, result, lookup)
for x in reversed(range(1, n+1)):
j = i if x == 1 else i+x
if lookup[x] or j >= len(result) or result[j]:
continue
result[i], result[j], lookup[x] = x, x, True
if backtracking(n, i+1, result, lookup):
return True
result[i], result[j], lookup[x] = 0, 0, False
return False
result, lookup = [0]*(2*n-1), [False]*(n+1)
backtracking(n, 0, result, lookup)
return result
| 32.107143 | 62 | 0.463849 | 868 | 0.965517 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.098999 |
4edfe61322b334879f9f8b3e249afc1b2f4d6883 | 868 | py | Python | General Questions/Longest_Common_Prefix.py | siddhi-244/CompetitiveProgrammingQuestionBank | 4c265d41b82a7d4370c14d367f78effa9ed95d3c | [
"MIT"
] | 931 | 2020-04-18T11:57:30.000Z | 2022-03-31T15:15:39.000Z | General Questions/Longest_Common_Prefix.py | geekySapien/CompetitiveProgrammingQuestionBank | 9e84a88a85dbabd95207391967abde298ddc031d | [
"MIT"
] | 661 | 2020-12-13T04:31:48.000Z | 2022-03-15T19:11:54.000Z | General Questions/Longest_Common_Prefix.py | geekySapien/CompetitiveProgrammingQuestionBank | 9e84a88a85dbabd95207391967abde298ddc031d | [
"MIT"
] | 351 | 2020-08-10T06:49:21.000Z | 2022-03-25T04:02:12.000Z | #Longest Common Prefix in python
#Implementation of python program to find the longest common prefix amongst the given list of strings.
#If there is no common prefix then returning 0.
#define the function to evaluate the longest common prefix
def longestCommonPrefix(s):
p = '' #declare an empty string
for i in range(len(min(s, key=len))):
f = s[0][i]
for j in s[1:]:
if j[i] != f:
return p
p += f
return p #return the longest common prefix
n = int(input("Enter the number of names in list for input:"))
print("Enter the Strings:")
s = [input() for i in range(n)]
if(longestCommonPrefix(s)):
print("The Common Prefix is:" ,longestCommonPrefix(s))
else:
print("There is no common prefix for the given list of strings, hence the answer is:", 0) | 37.73913 | 103 | 0.626728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 467 | 0.538018 |
4ee2287446b5623f2360208ebd4d39b34fb838c9 | 2,827 | py | Python | codes/1hop_getTwitterUserDetails.py | hridaydutta123/ActiveProbing | 763e90fb9e4d996fefd46fb72a53345f83bb3366 | [
"BSD-2-Clause"
] | null | null | null | codes/1hop_getTwitterUserDetails.py | hridaydutta123/ActiveProbing | 763e90fb9e4d996fefd46fb72a53345f83bb3366 | [
"BSD-2-Clause"
] | null | null | null | codes/1hop_getTwitterUserDetails.py | hridaydutta123/ActiveProbing | 763e90fb9e4d996fefd46fb72a53345f83bb3366 | [
"BSD-2-Clause"
] | null | null | null | import tweepy
from tweepy import OAuthHandler
import sys
import ConfigParser
from pymongo import MongoClient
import datetime
from random import randint
import time
# Mongo Settings
# Connect to MongoDB
client = MongoClient("hpc.iiitd.edu.in", 27017, maxPoolSize=50)
# Connect to db bitcoindb
db=client.activeprobing
settings_file = sys.argv[1]
#This file creates user-language feature generation
if len(sys.argv) < 1:
print """
Command : python userLanguages.py <inp-file> <settings-file>
(IN OUR CASE)
python userLanguages.py ../../Dataset/username_userID.csv ../settings.txt
"""
sys.exit(1)
# Read config settings
config = ConfigParser.ConfigParser()
config.readfp(open(settings_file))
# Random API key selection
randVal = randint(1,8)
CONSUMER_KEY = config.get('API Keys ' + str(randVal), 'API_KEY')
CONSUMER_SECRET = config.get('API Keys ' + str(randVal), 'API_SECRET')
ACCESS_KEY = config.get('API Keys ' + str(randVal), 'ACCESS_TOKEN')
ACCESS_SECRET = config.get('API Keys ' + str(randVal), 'ACCESS_TOKEN_SECRET')
auth = OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
api = tweepy.API(auth)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
#search
api = tweepy.API(auth)
# Get list of userIDs in mongo
allUserIDs = db.followers.distinct("id")
userList = [170995068]
for users in userList:
try:
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)
# Get followerids of user
c = tweepy.Cursor(api.followers_ids, user_id = users)
followerids = []
for page in c.pages():
followerids.append(page)
print "ids=", followerids
except tweepy.TweepError:
print "tweepy.TweepError="#, tweepy.TweepError
except:
e = sys.exc_info()[0]
print "Error: %s" % e
for ids in followerids[0]:
print ids
# Tweepy API get user details
result = api.get_user(user_id=ids)
if ids not in allUserIDs:
# Insert into mongo
result._json['lastModified'] = datetime.datetime.now()
result._json['followerOf'] = users
insertMongo = db.followers.insert_one(result._json)
else:
# # Check for changes
userMongoDetails = db.followers.find({'id':ids}, {'friends_count': 1, 'followers_count': 1})
for values in userMongoDetails:
existingFollowers = values['followers_count']
existingFriends = values['friends_count']
# New followers
newFollowers = result._json['followers_count']
newFriends = result._json['friends_count']
# Check if followers and friends are same as exist in mongo
if newFollowers != existingFollowers or newFriends != existingFriends:
changeVals = {'timestamp': datetime.datetime.now(),'followers_count':newFollowers, 'friends_count': newFriends}
db.followers.update({'id':ids}, {'$push': {"changes": changeVals}}, False, True)
| 30.728261 | 115 | 0.718783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.324726 |
4ee532fd58e6d9d5f5133bca89673cde76f9ac90 | 1,580 | py | Python | netbox/extras/migrations/0061_extras_change_logging.py | TheFlyingCorpse/netbox | a226f06b1beb575011d783b202d76cb74d3b1f79 | [
"Apache-2.0"
] | 4,994 | 2019-07-01T13:15:44.000Z | 2022-03-31T19:55:45.000Z | netbox/extras/migrations/0061_extras_change_logging.py | TheFlyingCorpse/netbox | a226f06b1beb575011d783b202d76cb74d3b1f79 | [
"Apache-2.0"
] | 4,045 | 2019-07-01T14:24:09.000Z | 2022-03-31T16:07:39.000Z | netbox/extras/migrations/0061_extras_change_logging.py | TheFlyingCorpse/netbox | a226f06b1beb575011d783b202d76cb74d3b1f79 | [
"Apache-2.0"
] | 1,225 | 2019-07-01T15:34:03.000Z | 2022-03-31T16:47:09.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extras', '0060_customlink_button_class'),
]
operations = [
migrations.AddField(
model_name='customfield',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='customfield',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='customlink',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='customlink',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='exporttemplate',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='exporttemplate',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='webhook',
name='created',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='webhook',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
]
| 30.384615 | 65 | 0.563924 | 1,536 | 0.972152 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.14557 |
4ee5c4620828c4c0155ae2a8226dc077ef23d941 | 2,849 | py | Python | tests/test_deployment/test__issue_tracker.py | getslash/scotty | 93ea0e41f8e10249e82adc4966ad9aef67b83eba | [
"BSD-3-Clause"
] | 10 | 2015-12-30T14:01:09.000Z | 2020-06-05T19:10:23.000Z | tests/test_deployment/test__issue_tracker.py | getslash/scotty | 93ea0e41f8e10249e82adc4966ad9aef67b83eba | [
"BSD-3-Clause"
] | 41 | 2015-12-31T12:08:15.000Z | 2020-07-27T11:22:56.000Z | tests/test_deployment/test__issue_tracker.py | getslash/scotty | 93ea0e41f8e10249e82adc4966ad9aef67b83eba | [
"BSD-3-Clause"
] | 2 | 2016-05-22T15:25:47.000Z | 2017-06-06T11:11:23.000Z | import http
import uuid
from itertools import chain, combinations
import pytest
import requests
import slash
def powerset(iterable):
s = list(iterable)
return (
frozenset(p) for p in chain.from_iterable((combinations(s, r)) for r in range(len(s) + 1))
)
def test_empty_issue(tracker):
with pytest.raises(requests.exceptions.HTTPError) as e:
tracker.create_issue(" ")
assert e._excinfo[1].response.status_code == http.client.CONFLICT
def test_issue_creation(beam, issue_factory):
issue = issue_factory.get()
beam, _ = beam
assert len(beam.associated_issues) == 0
beam.set_issue_association(issue.id_in_scotty, True)
beam.update()
assert beam.associated_issues == [issue.id_in_scotty]
beam.set_issue_association(issue.id_in_scotty, False)
beam.update()
assert len(beam.associated_issues) == 0
def test_issue_deletion(beam, issue_factory):
issue = issue_factory.get()
beam, _ = beam
assert len(beam.associated_issues) == 0
beam.set_issue_association(issue.id_in_scotty, True)
beam.update()
assert beam.associated_issues == [issue.id_in_scotty]
issue.delete()
beam.update()
assert len(beam.associated_issues) == 0
def test_tracker_deletion(beam, tracker, issue_factory):
issue = issue_factory.get()
beam, _ = beam
assert len(beam.associated_issues) == 0
beam.set_issue_association(issue.id_in_scotty, True)
beam.update()
assert beam.associated_issues == [issue.id_in_scotty]
tracker.delete_from_scotty()
beam.update()
assert len(beam.associated_issues) == 0
def test_tracker_get_by_name(tracker, scotty):
assert scotty.get_tracker_id("tests_tracker") == tracker.id
@pytest.mark.parametrize("add_spaces", [True, False])
def test_create_issue_twice(issue_factory, tracker, scotty, add_spaces):
issue = issue_factory.get()
new_name = issue.id_in_tracker
if add_spaces:
new_name = " " + new_name + " "
assert scotty.create_issue(tracker.id, new_name) == issue.id_in_scotty
_TRACKER_PARAMS = frozenset(["url", "name", "config"])
@pytest.mark.parametrize("params", powerset(_TRACKER_PARAMS))
def test_tracker_modification(scotty, tracker, params):
def _get_tracker_data():
response = scotty._session.get("{}/trackers/{}".format(scotty._url, tracker.id))
response.raise_for_status()
return response.json()["tracker"]
original_data = _get_tracker_data()
unmodified_params = _TRACKER_PARAMS - params
kwargs = {p: str(uuid.uuid4()) for p in params}
if "config" in kwargs:
kwargs["config"] = {"value": str(uuid.uuid4())}
tracker.update(**kwargs)
data = _get_tracker_data()
for p in unmodified_params:
assert data[p] == original_data[p]
for p in params:
assert data[p] == kwargs[p]
| 29.071429 | 98 | 0.702001 | 0 | 0 | 0 | 0 | 1,064 | 0.373464 | 0 | 0 | 113 | 0.039663 |
4ee889d75d3834fac8478bde6be85577e39bc8ad | 3,826 | py | Python | iotcookbook/device/pi/docker/buzzer/app/client.py | haizaar/crossbar-examples | a3daffc0348565aa5e989433978dec2913242ea8 | [
"Apache-2.0"
] | null | null | null | iotcookbook/device/pi/docker/buzzer/app/client.py | haizaar/crossbar-examples | a3daffc0348565aa5e989433978dec2913242ea8 | [
"Apache-2.0"
] | null | null | null | iotcookbook/device/pi/docker/buzzer/app/client.py | haizaar/crossbar-examples | a3daffc0348565aa5e989433978dec2913242ea8 | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import six
import txaio
txaio.use_twisted()
import RPi.GPIO as GPIO
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.error import ReactorNotRunning
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.twisted.util import sleep
def get_serial():
"""
Get the Pi's serial number.
"""
with open('/proc/cpuinfo') as fd:
for line in fd.read().splitlines():
line = line.strip()
if line.startswith('Serial'):
_, serial = line.split(':')
return int(serial.strip(), 16)
class BuzzerComponent(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
self._serial = get_serial()
self._prefix = u'io.crossbar.demo.iotstarterkit.{}.buzzer'.format(self._serial)
self.log.info("Crossbar.io IoT Starterkit Serial No.: {serial}", serial=self._serial)
self.log.info("BuzzerComponent connected: {details}", details=details)
# get custom configuration
cfg = self.config.extra
# initialize buzzer
self._buzzer_pin = cfg['buzzer_pin']
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self._buzzer_pin, GPIO.OUT)
GPIO.output(self._buzzer_pin, 0)
for proc in [
(self.beep, 'beep'),
(self.welcome, 'welcome'),
]:
yield self.register(proc[0], u'{}.{}'.format(self._prefix, proc[1]))
self.log.info("BuzzerComponent ready!")
self.welcome()
@inlineCallbacks
def welcome(self):
"""
Play annoying beep sequence.
"""
# sequence of 7 short beeps
yield self.beep(7)
# wait 0.5s
yield sleep(.5)
# another 3 longer beeps
yield self.beep(3, on=200, off=200)
@inlineCallbacks
def beep(self, count=1, on=30, off=80):
"""
Trigger beeping sequence.
:param count: Number of beeps.
:type count: int
:param on: ON duration in ms.
:type on: int
:param off: OFF duration in ms.
:type off: int
"""
for i in range(count):
GPIO.output(self._buzzer_pin, 1)
yield sleep(float(on) / 1000.)
GPIO.output(self._buzzer_pin, 0)
yield sleep(float(off) / 1000.)
def onLeave(self, details):
self.log.info("Session closed: {details}", details=details)
self.disconnect()
def onDisconnect(self):
self.log.info("Connection closed")
try:
reactor.stop()
except ReactorNotRunning:
pass
if __name__ == '__main__':
# Crossbar.io connection configuration
url = os.environ.get('CBURL', u'wss://demo.crossbar.io/ws')
realm = os.environ.get('CBREALM', u'crossbardemo')
# parse command line parameters
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output.')
parser.add_argument('--url', dest='url', type=six.text_type, default=url, help='The router URL (default: "ws://localhost:8080/ws").')
parser.add_argument('--realm', dest='realm', type=six.text_type, default=realm, help='The realm to join (default: "realm1").')
args = parser.parse_args()
# custom configuration data
if args.debug:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
extra = {
# GPI pin of buzzer
u'buzzer_pin': 16,
}
# create and start app runner for our app component ..
runner = ApplicationRunner(url=args.url, realm=args.realm, extra=extra)
runner.run(BuzzerComponent, auto_reconnect=True)
| 27.927007 | 137 | 0.618662 | 2,051 | 0.536069 | 1,638 | 0.428123 | 1,701 | 0.44459 | 0 | 0 | 1,134 | 0.296393 |
4ee8e5064d073bd9fe65b5f652de6a065ad8feed | 468 | py | Python | troupon/deals/tasks.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
] | 14 | 2016-01-12T07:31:09.000Z | 2021-11-20T19:29:35.000Z | troupon/deals/tasks.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
] | 52 | 2015-09-02T14:54:43.000Z | 2016-08-01T08:22:21.000Z | troupon/deals/tasks.py | andela/troupon | 3704cbe6e69ba3e4c53401d3bbc339208e9ebccd | [
"MIT"
] | 17 | 2015-09-30T13:18:48.000Z | 2021-11-18T16:25:12.000Z | from celery.decorators import periodic_task
from celery.task.schedules import crontab
from celery.utils.log import get_task_logger
from utils import scraper
logger = get_task_logger(__name__)
# A periodic task that will run every minute
@periodic_task(run_every=(crontab(hour=10, day_of_week="Wednesday")))
def send_periodic_emails():
logger.info("Start task")
result = scraper.send_periodic_emails()
logger.info("Task finished: result = %i" % result)
| 29.25 | 69 | 0.779915 | 0 | 0 | 0 | 0 | 226 | 0.482906 | 0 | 0 | 95 | 0.202991 |
4ee93f1b5b503980b03dc4c25a2236f84fadfd5f | 806 | py | Python | unit_testing/temp_manage.py | ddsprasad/pythondev | be3e555ff4a877ecac120b650130c601772050b1 | [
"MIT"
] | null | null | null | unit_testing/temp_manage.py | ddsprasad/pythondev | be3e555ff4a877ecac120b650130c601772050b1 | [
"MIT"
] | null | null | null | unit_testing/temp_manage.py | ddsprasad/pythondev | be3e555ff4a877ecac120b650130c601772050b1 | [
"MIT"
] | null | null | null | from memsql.common import database
import sys
from datetime import datetime
DATABASE = 'PREPDB'
HOST = '10.1.100.12'
PORT = '3306'
USER = 'root'
PASSWORD = 'In5pir0n@121'
def get_connection(db=DATABASE):
""" Returns a new connection to the database. """
return database.connect(host=HOST, port=PORT, user=USER, password=PASSWORD, database=db)
def run_temp_scheduler():
with get_connection() as conn:
x = conn.query("call tvf_temp_scheduler()")
for i in x:
if 'ERROR' in str(list(i.values())):
print(datetime.now(), ': ', list(i.values()))
sys.exit()
else:
print(datetime.now(), ': ', list(i.values()))
print(datetime.now(), ": TEMP TABLES PERSISTED SUCCESSFULLY")
run_temp_scheduler() | 28.785714 | 92 | 0.614144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.218362 |
4ee959a3b5b6300a196f631748a0e734cce242cc | 534 | py | Python | tabcmd/parsers/remove_users_parser.py | WillAyd/tabcmd | 1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15 | [
"MIT"
] | null | null | null | tabcmd/parsers/remove_users_parser.py | WillAyd/tabcmd | 1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15 | [
"MIT"
] | null | null | null | tabcmd/parsers/remove_users_parser.py | WillAyd/tabcmd | 1ba4a6ce1586b5ec4286aca0edff0fbaa1c69f15 | [
"MIT"
] | null | null | null | from .global_options import *
class RemoveUserParser:
"""
Parser to removeusers command
"""
@staticmethod
def remove_user_parser(manager, command):
"""Method to parse remove user arguments passed by the user"""
remove_users_parser = manager.include(command)
remove_users_parser.add_argument('groupname',
help='The group to remove users from.')
set_users_file_arg(remove_users_parser)
set_completeness_options(remove_users_parser)
| 31.411765 | 80 | 0.662921 | 501 | 0.938202 | 0 | 0 | 422 | 0.790262 | 0 | 0 | 151 | 0.282772 |
4eea4879abfe5efa38a70bbb4827cc57672e6417 | 6,819 | py | Python | Lab6/passengers.py | programiranje3/v2020 | 93c9793aadfea1c15107447951e2db5284fe8802 | [
"MIT"
] | null | null | null | Lab6/passengers.py | programiranje3/v2020 | 93c9793aadfea1c15107447951e2db5284fe8802 | [
"MIT"
] | null | null | null | Lab6/passengers.py | programiranje3/v2020 | 93c9793aadfea1c15107447951e2db5284fe8802 | [
"MIT"
] | null | null | null | #
# Create the FlightService enumeration that defines the following items (services):
# (free) snack, (free) refreshments, (free) meal, priority boarding,
# (free) onboard wifi, and an item for cases when services are not specified.
#
from enum import Enum
class FlightService(Enum):
unspecified = 0
snack = 1
refreshments = 2
meal = 3
priority_boarding = 4
onboard_wifi = 5
#
# Modify the Passenger class from Lab5 as follows:
#
# In addition to the *name* and *passport* attributes, the class should also have
# the following attributes:
# - *air_miles* - the number of air miles the passenger has accumulated
# - *checked_in* - a boolean indicator, true if the passenger has checked in
# - *services* - a class attribute defining the list of services available to all
# passengers of a particular class (category); available services for various categories
# of passengers should be defined as elements of the FlightService enumeration.
# For the Passenger class, services are unspecified as they depend on the passenger
# category (will be defined in the subclasses).
# Note that the attribute *is_business* (from Lab 5 version of the Passenger class) is dropped,
# as the passenger category will be handled through (Python) class hierarchy.
#
# The following methods of the Passenger class need to be revised:
#
# - constructor (__init__()) - it should receive 4 input arguments, one for
# each attribute; only the arguments for the passenger's name and passport
# have to be specified; the argument for *air_miles* has None as its default value,
# while False is the default value for *checked_in*.
#
# - a method that returns a string representation of a given Passenger object (__str__())
# so that it describes a passenger with the extended set of attributes.
#
# Finally, the following new methods should be added:
#
# - get and set methods (using appropriate decorators) for the *air_miles* attribute;
# the set method should assure that a non-negative integer value is assigned to this attribute
#
# - a class method (available_services()) that returns a list of strings describing services
# available to the passengers; this list is created based on the *services* class attribute.
#
from sys import stderr
class Passenger:
services = [FlightService.unspecified]
def __init__(self, name, passport, air_miles=None, checked_in=False):
self.name = name
self.passport = passport
self.air_miles = air_miles
self.checked_in = checked_in
@property
def passport(self):
return self.__passport
@passport.setter
def passport(self, value):
if (isinstance(value, str)) and (len(value) == 6) and (all([ch.isdigit() for ch in value])):
self.__passport = value
elif (isinstance(value, int)) and (len(str(value)) == 6):
self.__passport = str(value)
else:
print("Error! Incorrect passport number! Setting passport to None")
self.__passport = None
@property
def air_miles(self):
return self.__air_miles
@air_miles.setter
def air_miles(self, value):
self.__air_miles = None
if (value is None) or (isinstance(value, int) and (value >= 0)):
self.__air_miles = value
elif isinstance(value, str):
try:
if int(value) >= 0:
self.__air_miles = int(value)
except ValueError:
stderr.write(f"Error! An incorrect value {value} passed for the air miles attribute\n")
else:
print(f"Error! The input value {value} cannot be used for setting the air miles attribute")
def __str__(self):
passenger_str = f"{self.name}, with passport number: " + (self.passport if self.passport else "unavailable")
passenger_str += f", collected {self.air_miles} air miles" if self.air_miles else ""
passenger_str += "; check-in completed" if self.checked_in else "; not checked in yet"
return passenger_str
def __eq__(self, other):
if isinstance(other, Passenger):
if self.passport and other.passport:
return (self.name == other.name) and (self.passport == other.passport)
else:
print(f"Cannot determine equality since at least one of the passengers does not have passport number")
return False
else:
print("The other object is not of the Passenger type")
return False
@classmethod
def available_services(cls):
return [s.name.replace('_', ' ') for s in cls.services]
#
# Create the EconomyPassenger class that extends the Passenger class and has:
#
# - method candidate_for_upgrade that checks if the passenger is a candidate for an upgrade
# and returns an appropriate boolean value; a passenger is a candidate for an upgrade if
# their current air miles exceed the given threshold (input parameter) and the passenger
# has checked in
#
# - changed value for the *services* class attribute so that it includes snack and refreshments
# (as elements of the FlightServices enum)
#
# - overridden __str__ method so that it first prints "Economy class passenger" and then
# the available information about the passenger
#
class EconomyPassenger(Passenger):
services = [FlightService.snack, FlightService.refreshments]
def candidate_for_upgrade(self, min_air_miles):
return self.checked_in and self.air_miles and (self.air_miles > min_air_miles)
def __str__(self):
return "Economy class passenger " + super().__str__()
#
# Create class BusinessPassenger that extends the Passenger class and has:
#
# - changed value for the services class attribute, so that it includes:
# priority boarding, meal, digital entertainment, and onboard wifi
#
# - overridden __str__ method so that it first prints "Business class passenger" and then
# the available information about the passengers
#
class BusinessPassenger(Passenger):
services = [FlightService.meal, FlightService.onboard_wifi, FlightService.priority_boarding]
def __str__(self):
return "Business class passenger " + super().__str__()
if __name__ == '__main__':
jim = EconomyPassenger("Jim Jonas", '123456', air_miles=1000)
# jim.services = [FlightService.onboard_wifi, FlightService.meal]
print(jim)
print(jim.__dict__)
print(jim.available_services())
print()
bob = EconomyPassenger("Bob Jones", '987654', checked_in=True)
print(bob)
bob.air_miles = '20200'
print(bob.__dict__)
print(bob.available_services())
print()
mike = BusinessPassenger("Mike Stone", '234567', air_miles=2000)
print(mike)
print(mike.__dict__)
print(mike.available_services()) | 36.079365 | 118 | 0.69585 | 3,069 | 0.450066 | 0 | 0 | 1,244 | 0.182431 | 0 | 0 | 3,740 | 0.548468 |
4eea98d24b1e30d22ba208bedbdfdc1f2bbee460 | 722 | py | Python | src/documents/management/commands/generate_documents.py | Talengi/phase | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | [
"MIT"
] | 8 | 2016-01-29T11:53:40.000Z | 2020-03-02T22:42:02.000Z | src/documents/management/commands/generate_documents.py | Talengi/phase | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | [
"MIT"
] | 289 | 2015-03-23T07:42:52.000Z | 2022-03-11T23:26:10.000Z | src/documents/management/commands/generate_documents.py | Talengi/phase | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | [
"MIT"
] | 7 | 2015-12-08T09:03:20.000Z | 2020-05-11T15:36:51.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from documents.tests.utils import generate_random_documents
from categories.models import Category
class Command(BaseCommand):
args = '<number_of_documents> <category_id>'
help = 'Creates a given number of random documents'
def handle(self, *args, **options):
nb_of_docs = int(args[0])
category_id = int(args[1])
category = Category.objects.get(pk=category_id)
generate_random_documents(nb_of_docs, category)
self.stdout.write(
'Successfully generated {nb_of_docs} documents'.format(
nb_of_docs=nb_of_docs,
).encode()
)
| 26.740741 | 67 | 0.66759 | 524 | 0.725762 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.232687 |
4eeadf88e58b24119aee24c1e548e0b28afca7bf | 2,004 | py | Python | checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py | Devocean8-Official/checkov | 8ce61421fa838a97981ab3bd0ae2a12e541666b2 | [
"Apache-2.0"
] | 1 | 2021-02-13T15:24:42.000Z | 2021-02-13T15:24:42.000Z | checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py | Devocean8-Official/checkov | 8ce61421fa838a97981ab3bd0ae2a12e541666b2 | [
"Apache-2.0"
] | 7 | 2021-04-12T06:54:07.000Z | 2022-03-21T14:04:14.000Z | checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py | Devocean8-Official/checkov | 8ce61421fa838a97981ab3bd0ae2a12e541666b2 | [
"Apache-2.0"
] | 1 | 2021-12-16T03:09:55.000Z | 2021-12-16T03:09:55.000Z | import re
from typing import Any, Dict
from checkov.common.models.consts import DOCKER_IMAGE_REGEX
from checkov.common.models.enums import CheckResult
from checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck
class ImagePullPolicyAlways(BaseK8sContainerCheck):
def __init__(self) -> None:
"""
Image pull policy should be set to always to ensure you get the correct image and imagePullSecrets are correct
Default is 'IfNotPresent' unless image tag is omitted or :latest
https://kubernetes.io/docs/concepts/configuration/overview/#container-images
An admission controller could be used to enforce imagePullPolicy
"""
name = "Image Pull Policy should be Always"
id = "CKV_K8S_15"
# Location: container .imagePullPolicy
super().__init__(name=name, id=id)
def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:
self.evaluated_container_keys = ["image", "imagePullPolicy"]
if conf.get("image"):
# Remove the digest, if present
image_val = conf["image"]
if not isinstance(image_val, str) or image_val.strip() == "":
return CheckResult.UNKNOWN
if "@" in image_val:
image_val = image_val[0 : image_val.index("@")]
(image, tag) = re.findall(DOCKER_IMAGE_REGEX, image_val)[0]
if "imagePullPolicy" not in conf:
if tag == "latest" or tag == "":
# Default imagePullPolicy = Always
return CheckResult.PASSED
else:
# Default imagePullPolicy = IfNotPresent
return CheckResult.FAILED
else:
if conf["imagePullPolicy"] != "Always":
return CheckResult.FAILED
else:
return CheckResult.FAILED
return CheckResult.PASSED
check = ImagePullPolicyAlways()
| 39.294118 | 118 | 0.628244 | 1,725 | 0.860778 | 0 | 0 | 0 | 0 | 0 | 0 | 655 | 0.326846 |
4eeb6f838c4a0c27e093b28216bbce9b8353f8d9 | 381 | py | Python | jobs/migrations/0018_rename_user_id_recruiterpage_user.py | digitaloxford/do-wagtail | 49dd75b95109ebb38bf66aca13d3fdeb8e25d319 | [
"MIT"
] | 2 | 2021-04-11T11:59:51.000Z | 2021-04-12T06:56:23.000Z | jobs/migrations/0018_rename_user_id_recruiterpage_user.py | digitaloxford/do-wagtail | 49dd75b95109ebb38bf66aca13d3fdeb8e25d319 | [
"MIT"
] | 8 | 2021-04-10T10:40:27.000Z | 2022-01-25T16:32:22.000Z | jobs/migrations/0018_rename_user_id_recruiterpage_user.py | digitaloxford/do-wagtail | 49dd75b95109ebb38bf66aca13d3fdeb8e25d319 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-05 11:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0017_rename_user_recruiterpage_user_id'),
]
operations = [
migrations.RenameField(
model_name='recruiterpage',
old_name='user_id',
new_name='user',
),
]
| 20.052632 | 59 | 0.603675 | 296 | 0.776903 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.322835 |
4eeb8ef3221f67956b15ccde258bd099c6b02706 | 5,154 | py | Python | celescope/fusion/multi_fusion.py | frostmoure98/CeleScope | e001fa18ba3d4aaf3f5bc40aa9386fa0d7866381 | [
"MIT"
] | null | null | null | celescope/fusion/multi_fusion.py | frostmoure98/CeleScope | e001fa18ba3d4aaf3f5bc40aa9386fa0d7866381 | [
"MIT"
] | null | null | null | celescope/fusion/multi_fusion.py | frostmoure98/CeleScope | e001fa18ba3d4aaf3f5bc40aa9386fa0d7866381 | [
"MIT"
] | null | null | null | import os
import glob
import sys
import argparse
import re
from collections import defaultdict
from celescope.__init__ import __CONDA__
from celescope.fusion.__init__ import __STEPS__, __ASSAY__
from celescope.tools.utils import merge_report, generate_sjm
from celescope.tools.utils import parse_map_col4, multi_opts, link_data
def main():
# init
assay = __ASSAY__
steps = __STEPS__
conda = __CONDA__
app = 'celescope'
# parser
parser = multi_opts(assay)
parser.add_argument('--starMem', help='starMem', default=10)
parser.add_argument('--thread', help='thread', default=6)
parser.add_argument('--genomeDir', help='fusion genomeDir', required=True)
parser.add_argument(
"--fusion_pos",
help="first base position of the second gene(0-start),tsv file",
required=True)
parser.add_argument("--UMI_min", default=1)
args = parser.parse_args()
# read args
outdir = args.outdir
chemistry = args.chemistry
pattern = args.pattern
whitelist = args.whitelist
linker = args.linker
lowQual = args.lowQual
lowNum = args.lowNum
mod = args.mod
rm_files = args.rm_files
# parse mapfile
fq_dict, match_dict = parse_map_col4(args.mapfile, None)
# link
link_data(outdir, fq_dict)
# custom args
thread = args.thread
genomeDir = args.genomeDir
starMem = args.starMem
fusion_pos = args.fusion_pos
UMI_min = args.UMI_min
# mk log dir
logdir = outdir + '/log'
os.system('mkdir -p %s' % (logdir))
# script init
sjm_cmd = 'log_dir %s\n' % (logdir)
sjm_order = ''
shell_dict = defaultdict(str)
# outdir dict
for sample in fq_dict:
outdir_dic = {}
index = 0
for step in steps:
step_outdir = f"{outdir}/{sample}/{index:02d}.{step}"
outdir_dic.update({step: step_outdir})
index += 1
# sample
step = "sample"
cmd = (
f'{app} {assay} {step} '
f'--chemistry {chemistry} '
f'--sample {sample} --outdir {outdir_dic[step]} --assay {assay} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda)
shell_dict[sample] += cmd + '\n'
last_step = step
# barcode
arr = fq_dict[sample]
step = "barcode"
cmd = (
f'{app} {assay} {step} '
f'--fq1 {arr[0]} --fq2 {arr[1]} --chemistry {chemistry} '
f'--pattern {pattern} --whitelist {whitelist} --linker {linker} '
f'--sample {sample} --lowQual {lowQual} --thread {thread} '
f'--lowNum {lowNum} --outdir {outdir_dic[step]} --assay {assay} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda, m=5, x=thread)
sjm_order += f'order {step}_{sample} after {last_step}_{sample}\n'
shell_dict[sample] += cmd + '\n'
last_step = step
# adapt
step = "cutadapt"
fq = f'{outdir_dic["barcode"]}/{sample}_2.fq.gz'
cmd = (
f'{app} {assay} {step} '
f'--fq {fq} --sample {sample} --outdir '
f'{outdir_dic[step]} --assay {assay} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda, m=5, x=1)
sjm_order += f'order {step}_{sample} after {last_step}_{sample}\n'
shell_dict[sample] += cmd + '\n'
last_step = step
# STAR_fusion
step = 'STAR_fusion'
input_read = f'{outdir_dic["cutadapt"]}/{sample}_clean_2.fq.gz'
cmd = (
f'{app} {assay} {step} '
f'--outdir {outdir_dic[step]} --assay {assay} --sample {sample} '
f'--thread {thread} '
f'--input_read {input_read} '
f'--genomeDir {genomeDir} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda, m=starMem, x=thread)
sjm_order += f'order {step}_{sample} after {last_step}_{sample}\n'
shell_dict[sample] += cmd + '\n'
last_step = step
# count_fusion
step = 'count_fusion'
bam = f'{outdir_dic["STAR_fusion"]}/{sample}_Aligned.sortedByCoord.out.bam'
cmd = (
f'{app} {assay} {step} '
f'--outdir {outdir_dic[step]} --assay {assay} --sample {sample} '
f'--bam {bam} '
f'--UMI_min {UMI_min} '
f'--match_dir {match_dict[sample]} '
f'--fusion_pos {fusion_pos} '
)
sjm_cmd += generate_sjm(cmd, f'{step}_{sample}', conda, m=20, x=thread)
sjm_order += f'order {step}_{sample} after {last_step}_{sample}\n'
last_step = step
# merged report
if mod == 'sjm':
step = 'merge_report'
merge_report(
fq_dict,
steps,
last_step,
sjm_cmd,
sjm_order,
logdir,
conda,
outdir,
rm_files,
)
if mod == 'shell':
os.system('mkdir -p ./shell/')
for sample in shell_dict:
with open(f'./shell/{sample}.sh', 'w') as f:
f.write(shell_dict[sample])
if __name__ == '__main__':
main() | 30.862275 | 84 | 0.558013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,860 | 0.360885 |