hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f2a269b65ae0fe9d318d9013769d0a87c6d1a66
| 2,475
|
py
|
Python
|
cesium_app/app_server.py
|
yaowenxi/cesium
|
b87c8bcafc8a7707877f8b9e9b111a2a99b5aeee
|
[
"BSD-3-Clause"
] | 41
|
2016-10-10T23:14:54.000Z
|
2021-07-08T19:44:14.000Z
|
cesium_app/app_server.py
|
cesium-ml/cesium_web
|
6dd9977ff037982d50f740bfb62012b508eebd29
|
[
"BSD-3-Clause"
] | 200
|
2016-06-22T19:55:38.000Z
|
2022-03-22T18:42:19.000Z
|
cesium_app/app_server.py
|
yaowenxi/cesium
|
b87c8bcafc8a7707877f8b9e9b111a2a99b5aeee
|
[
"BSD-3-Clause"
] | 26
|
2016-04-21T00:50:03.000Z
|
2019-11-04T20:19:53.000Z
|
import tornado.web
import os
import sys
import pathlib
from baselayer.app.config import Config
from . import models
from baselayer.app import model_util
# This provides `login`, `complete`, and `disconnect` endpoints
from social_tornado.routes import SOCIAL_AUTH_ROUTES
from .handlers import (
ProjectHandler,
DatasetHandler,
FeatureHandler,
PrecomputedFeaturesHandler,
ModelHandler,
PredictionHandler,
FeatureListHandler,
SklearnModelsHandler,
PlotFeaturesHandler,
PredictRawDataHandler
)
def make_app(cfg, baselayer_handlers, baselayer_settings):
"""Create and return a `tornado.web.Application` object with specified
handlers and settings.
Parameters
----------
cfg : Config
Loaded configuration. Can be specified with '--config'
(multiple uses allowed).
baselayer_handlers : list
Tornado handlers needed for baselayer to function.
baselayer_settings : cfg
Settings needed for baselayer to function.
"""
if baselayer_settings['cookie_secret'] == 'abc01234':
print('!' * 80)
print(' Your server is insecure. Please update the secret string ')
print(' in the configuration file!')
print('!' * 80)
for path_name, path in cfg['paths'].items():
if not os.path.exists(path):
print("Creating %s" % path)
try:
os.makedirs(path)
except Exception as e:
print(e)
handlers = baselayer_handlers + [
(r'/project(/.*)?', ProjectHandler),
(r'/dataset(/.*)?', DatasetHandler),
(r'/features(/[0-9]+)?', FeatureHandler),
(r'/features/([0-9]+)/(download)', FeatureHandler),
(r'/precomputed_features(/.*)?', PrecomputedFeaturesHandler),
(r'/models(/[0-9]+)?', ModelHandler),
(r'/models/([0-9]+)/(download)', ModelHandler),
(r'/predictions(/[0-9]+)?', PredictionHandler),
(r'/predictions/([0-9]+)/(download)', PredictionHandler),
(r'/predict_raw_data', PredictRawDataHandler),
(r'/features_list', FeatureListHandler),
(r'/sklearn_models', SklearnModelsHandler),
(r'/plot_features/(.*)', PlotFeaturesHandler)
]
settings = baselayer_settings
# settings.update({}) # Specify additional settings here
app = tornado.web.Application(handlers, **settings)
models.init_db(**cfg['database'])
model_util.create_tables()
return app
| 30.555556
| 76
| 0.641212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 997
| 0.402828
|
3f2ad7646cc054828d1f34022e2ec7ed31b8f6a0
| 903
|
py
|
Python
|
tests/test_building.py
|
sietekk/elevator
|
5058d36df323cc31a078c7016c57cc7f4488fcdc
|
[
"MIT"
] | null | null | null |
tests/test_building.py
|
sietekk/elevator
|
5058d36df323cc31a078c7016c57cc7f4488fcdc
|
[
"MIT"
] | null | null | null |
tests/test_building.py
|
sietekk/elevator
|
5058d36df323cc31a078c7016c57cc7f4488fcdc
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016 Michael Conroy
#
from elevator.building import (
Building,
Floor,
DEFAULT_FLOOR_QTY,
DEFAULT_ELEVATOR_QTY,
)
from elevator.elevator import Elevator
def test_building():
b1 = Building()
assert len(b1.floors) == DEFAULT_FLOOR_QTY, \
"Incorrect default number of floors"
assert len(b1.elevators) == DEFAULT_ELEVATOR_QTY, \
"Incorrect default number of elevators"
b2 = Building(20, 5)
assert len(b2.floors) == 20, \
"Initialize to wrong number of floors"
assert len(b2.elevators) == 5, \
"Initialized to wrong number of elevators"
for elevator in b2:
assert isinstance(elevator, Elevator), \
"Elevator object not instantiated with Elevator class"
for floor in b2.floors:
assert isinstance(floor, Floor), \
"Floor object not instantiated with Floor class"
| 24.405405
| 66
| 0.660022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 294
| 0.325581
|
3f2bc27a1667776823d6302c9923d489d7a4ce6b
| 707
|
py
|
Python
|
Modulo_3/semana 2/miercoles/main.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | null | null | null |
Modulo_3/semana 2/miercoles/main.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | null | null | null |
Modulo_3/semana 2/miercoles/main.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | 1
|
2022-03-04T00:57:18.000Z
|
2022-03-04T00:57:18.000Z
|
import tkinter as tk
from presentacion.formulario import FormularioPersona
def centrar_ventana(ventana):
aplicacion_ancho = 550
aplicacion_largo = 650
pantall_ancho = ventana.winfo_screenwidth()
pantall_largo = ventana.winfo_screenheight()
x = int((pantall_ancho/2) - (aplicacion_ancho/2))
y = int((pantall_largo/2) - (aplicacion_largo/2))
return ventana.geometry(f"{aplicacion_ancho}x{aplicacion_largo}+{x}+{y}")
try:
ventana=tk.Tk()
centrar_ventana(ventana)
ventana.title("Formulario")
form = FormularioPersona(ventana)
ventana.mainloop()
except Exception as e:
print("Existe un error : ", e)
| 30.73913
| 77
| 0.663366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.113154
|
3f2bff3972bb90e7ea59576e6eccf7d56961ec7e
| 196
|
py
|
Python
|
Voting/urls.py
|
Poornartha/Odonata
|
71e8dfc4e8d93c6ecc1a3a155459b7e43bd28cdb
|
[
"MIT"
] | null | null | null |
Voting/urls.py
|
Poornartha/Odonata
|
71e8dfc4e8d93c6ecc1a3a155459b7e43bd28cdb
|
[
"MIT"
] | null | null | null |
Voting/urls.py
|
Poornartha/Odonata
|
71e8dfc4e8d93c6ecc1a3a155459b7e43bd28cdb
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import teams_all, team_vote
urlpatterns = [
path('teams/all', teams_all, name="teams_all"),
path('teams/<int:pk>', team_vote, name="team_vote"),
]
| 28
| 56
| 0.704082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.25
|
3f2fbc4cbbd9085f9e4653f26ebfd93f8e5ea745
| 5,594
|
py
|
Python
|
models/3-Whats goin on/train_code/resnext50/train.py
|
cns-iu/HuBMAP---Hacking-the-Kidney
|
1a41c887f8edb0b52f5afade384a17dc3d3efec4
|
[
"MIT"
] | null | null | null |
models/3-Whats goin on/train_code/resnext50/train.py
|
cns-iu/HuBMAP---Hacking-the-Kidney
|
1a41c887f8edb0b52f5afade384a17dc3d3efec4
|
[
"MIT"
] | null | null | null |
models/3-Whats goin on/train_code/resnext50/train.py
|
cns-iu/HuBMAP---Hacking-the-Kidney
|
1a41c887f8edb0b52f5afade384a17dc3d3efec4
|
[
"MIT"
] | null | null | null |
from Dataset import *
from Network import *
from Functions import *
import os
from fastai.distributed import *
import argparse
import torch
try:
#from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, default='0,1', help='which gpu to use')
parser.add_argument('--path', type=str, default='/N/u/soodn/Carbonate/hubmap-kidney-segmentation', help='path of csv file with DNA sequences and labels')
parser.add_argument('--epochs', type=int, default=32, help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=64, help='size of each batch during training')
parser.add_argument('--weight_decay', type=float, default=1e-5, help='weight dacay used in optimizer')
parser.add_argument('--save_freq', type=int, default=1, help='saving checkpoints per save_freq epochs')
parser.add_argument('--dropout', type=float, default=.1, help='transformer dropout')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--nfolds', type=int, default=4, help='number of cross validation folds')
parser.add_argument('--fold', type=int, default=0, help='which fold to train')
parser.add_argument('--val_freq', type=int, default=1, help='which fold to train')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--expansion', type=int, default=64, help='number of expansion pixels')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='gradient_accumulation_steps')
parser.add_argument('--transfer', default=1, help='transfer learning activated')
opts = parser.parse_args()
return opts
opts=get_args()
#set up gpu
os.environ["CUDA_VISIBLE_DEVICES"] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
os.system('mkdir models')
os.system('mkdir logs')
#dice = Dice_th_pred(np.arange(0.2,0.7,0.01))
#datasets and dataloaders
dataset = HuBMAPDataset(path=opts.path, fold=opts.fold, nfolds=opts.nfolds, train=True, tfms=get_aug())
val_dataset = HuBMAPDataset(path=opts.path, fold=opts.fold, nfolds=opts.nfolds, train=False)
dataloader = DataLoader(dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.workers, drop_last=True)
val_dataloader = DataLoader(val_dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.workers, drop_last=True)
#model and optimizer
model = UneXt50().cuda()
#optimizer = Ranger(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
optimizer = torch.optim.Adam(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
# scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=1e3,
# max_lr=1e-3, epochs=opts.epochs, steps_per_epoch=len(dataloader))
criterion=nn.BCEWithLogitsLoss()
opt_level = 'O1'
model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
model = nn.DataParallel(model)
####### Transfer learning #######
if opts.transfer == 1:
best_model_path = "models_scratch/fold4.pth"
state_dict = torch.load(best_model_path)
model.load_state_dict(state_dict)
#some more things
logger=CSVLogger(['epoch','train_loss','val_loss','dice_coef'],f"logs/log_fold{opts.fold}.csv")
metric=Dice_soft()
best_metric=0
#training
scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.2, div_factor=1e2, max_lr=1e-4, epochs=opts.epochs, steps_per_epoch=len(dataloader))
for epoch in range(opts.epochs):
train_loss=0
model.train(True)
for data in tqdm(dataloader):
img=data['img'].to(device)
mask=data['mask'].to(device)
img=cutout(img)
output=model(img)
loss=criterion(output,mask)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
#if step%opts.gradient_accumulation_steps==0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
train_loss+=loss.item()
#break
train_loss/=len(dataloader)
print(f"### validating for epoch {epoch} ###")
val_loss=0
model.eval()
metric.reset()
with torch.no_grad():
for data in tqdm(val_dataloader):
if img.shape[0]%2!=0:
img=img[:-1]
mask=mask[:-1]
img=data['img'].to(device)
mask=data['mask'].to(device)
shape=img.shape
output=model(img)[:,:,opts.expansion//2:-opts.expansion//2,opts.expansion//2:-opts.expansion//2]
output[output != output] = 0
mask=mask[:,:,opts.expansion//2:-opts.expansion//2,opts.expansion//2:-opts.expansion//2]
metric.accumulate(output.detach(), mask)
loss=criterion(output,mask)
val_loss+=loss.item()
val_loss/=len(val_dataloader)
metric_this_epoch=metric.value
# metric_this_epoch=val_loss
logger.log([epoch+1,train_loss,val_loss,metric_this_epoch])
if metric_this_epoch>best_metric:
torch.save(model.state_dict(),f'models/fold{opts.fold}.pth')
best_metric=metric_this_epoch
| 44.752
| 163
| 0.704147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,594
| 0.284948
|
3f302d5ab9b39b181ba1fa9fc5436b10d4114686
| 6,220
|
py
|
Python
|
models/node/node.py
|
InfoCoV/Multi-Cro-CoV-cseBERT
|
5edc8e6d9c7de285c8fbb537b72f8f8b081d531d
|
[
"MIT"
] | null | null | null |
models/node/node.py
|
InfoCoV/Multi-Cro-CoV-cseBERT
|
5edc8e6d9c7de285c8fbb537b72f8f8b081d531d
|
[
"MIT"
] | null | null | null |
models/node/node.py
|
InfoCoV/Multi-Cro-CoV-cseBERT
|
5edc8e6d9c7de285c8fbb537b72f8f8b081d531d
|
[
"MIT"
] | 1
|
2022-02-17T14:32:13.000Z
|
2022-02-17T14:32:13.000Z
|
"""
NODE model definition and experiment setup.
Neural Oblivious Decision Ensembles for Deep Learning on Tabular Data
https://arxiv.org/abs/1909.06312
Model details:
https://pytorch-tabular.readthedocs.io/en/latest/models/#nodemodel
"""
import logging
import os.path
import shutil
from sklearn.metrics import classification_report
from omegaconf import OmegaConf
import optuna
from optuna.samplers import TPESampler
from pytorch_tabular import TabularModel
from pytorch_tabular.models import NodeConfig
from pytorch_tabular.config import (
DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig)
from pytorch_tabular.utils import get_class_weighted_cross_entropy
from optuna_utils import OptunaExperiments, run_experiments
LOGGER = logging.getLogger(__name__)
LABEL_COL = "retweet_label"
# updated by train.py before running
config = OmegaConf.create(
{"max_epochs": 50,
"lr_exp_min": -4,
"lr_exp_max": -3,
"alpha_exp_min": -4,
"alpha_exp_max": -3,
"batch_exp_min": 7,
"batch_exp_max": 8,
"num_trees_min": 512,
"num_trees_max": 2560,
"num_trees_step": 512,
"depth_min": 4,
"depth_max": 6,
"categorical_cols": [
"entities.urls", "entities.media", "user_in_net",
"has_covid_keyword", "user.followers_isna",
"users_mention_isna", "following_users_isna",
"users_reply_isna"],
"exp_log_freq": 100,
"seed": 1,
"num_workers": 24,
"embed_categorical": True}
)
class Experiments(OptunaExperiments):
def __init__(
self,
train_data,
val_data,
train_labels,
val_labels,
experiment_root,
config):
self.train_data_joined = train_data.copy()
self.train_data_joined[LABEL_COL] = train_labels
self.val_data_joined = val_data.copy()
self.val_data_joined[LABEL_COL] = val_labels
self.experiment_root = experiment_root
self.config = config
self.study = self.create_study()
self.best_score = None
self.cat_col_names = config.categorical_cols
self.num_col_names = [
c for c in train_data.columns if c not in config.categorical_cols]
self.data_config = DataConfig(
target=[LABEL_COL],
continuous_cols=self.num_col_names,
categorical_cols=self.cat_col_names,
normalize_continuous_features=False,
num_workers=config.num_workers)
self.weighted_loss = get_class_weighted_cross_entropy(
train_labels.values.ravel(), mu=0.1)
def create_study(self):
sampler = TPESampler(seed=self.config.study_seed)
study = optuna.create_study(sampler=sampler, direction="maximize")
for trial_dict in self.config.default_trials:
study.enqueue_trial(trial_dict)
return study
def optimize(self):
self.study.optimize(self.objective, n_trials=self.config.n_trials)
self.store_study()
def objective(self, trial):
lr_exp = trial.suggest_int(
"lr_exp", self.config.lr_exp_min, self.config.lr_exp_max)
lr = 10 ** lr_exp
alpha_exp = trial.suggest_int(
"alpha_exp", self.config.alpha_exp_min, self.config.alpha_exp_max)
alpha = 10 ** alpha_exp
batch_exp = trial.suggest_int(
"batch_exp", self.config.batch_exp_min, self.config.batch_exp_max)
batch_size = 2 ** batch_exp
num_trees = trial.suggest_int(
"num_trees",
self.config.num_trees_min, self.config.num_trees_max,
self.config.num_trees_step
)
depth = trial.suggest_int(
"depth", self.config.depth_min, self.config.depth_max)
experiment_path = self.config.experiment_root
checkpoints_path = os.path.join(experiment_path, "checkpoints")
tb_logs = os.path.join(experiment_path, "tb_logs")
run_name = "category_embedding"
# store all just for the current optuna run
if os.path.exists(checkpoints_path):
shutil.rmtree(checkpoints_path)
if os.path.exists(tb_logs):
shutil.rmtree(tb_logs)
trainer_config = TrainerConfig(
auto_lr_find=False,
gpus=1,
deterministic=True,
batch_size=batch_size,
max_epochs=self.config.max_epochs,
checkpoints_path=checkpoints_path,
)
optimizer_config = OptimizerConfig(
optimizer="AdamW",
optimizer_params={"weight_decay": alpha}
)
model_config = NodeConfig(
task="classification",
learning_rate=lr,
loss=self.weighted_loss,
num_trees=num_trees,
depth=depth,
embed_categorical=self.config.embed_categorical,
)
experiment_config = ExperimentConfig(
project_name=tb_logs,
run_name=run_name,
exp_log_freq=self.config.exp_log_freq
)
tabular_model = TabularModel(
data_config=self.data_config,
model_config=model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config,
experiment_config=experiment_config
)
tabular_model.fit(
train=self.train_data_joined,
validation=self.val_data_joined,
seed=self.config.seed,
loss=self.weighted_loss)
result = tabular_model.evaluate(self.val_data_joined)
LOGGER.info(result)
pred_df = tabular_model.predict(self.val_data_joined)
val_predictions = pred_df.prediction.values
out = classification_report(
self.val_data_joined[LABEL_COL].values, val_predictions,
digits=3, output_dict=True)
LOGGER.info(out)
f1 = out["macro avg"]["f1-score"]
if self.best_score is None or f1 > self.best_score:
self.best_score = f1
self.store_results(tabular_model, out)
self.store_study()
return f1
def run(config):
run_experiments(
config=config,
experiments_class=Experiments)
| 31.414141
| 78
| 0.646302
| 4,625
| 0.743569
| 0
| 0
| 0
| 0
| 0
| 0
| 868
| 0.13955
|
3f30899107200b08356b7f18f040b82026d98005
| 8,570
|
py
|
Python
|
dtf/packages/models.py
|
WebPowerLabs/django-trainings
|
97f7a96c0fbeb85a001201c74713f7944cb77236
|
[
"BSD-3-Clause"
] | null | null | null |
dtf/packages/models.py
|
WebPowerLabs/django-trainings
|
97f7a96c0fbeb85a001201c74713f7944cb77236
|
[
"BSD-3-Clause"
] | null | null | null |
dtf/packages/models.py
|
WebPowerLabs/django-trainings
|
97f7a96c0fbeb85a001201c74713f7944cb77236
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.core.urlresolvers import reverse
from djnfusion import server, key
from django.conf import settings
from jsonfield import JSONField
# TODO: change to this. Currently doesnt work. may have something to do with
# the server not in __init__
# from packages.providers.infusionsoft import server, key
from .managers import InfusionsoftTagManager, PackagePurchaseManager
from packages.managers import PackageManager
def remove_unused(_dict):
return_dict = {}
for _key, _value in _dict.iteritems():
if _value:
return_dict[_key] = _value
return return_dict
def setdictattrs(obj, _dict):
_dict = remove_unused(_dict)
for _key, _value in _dict.iteritems():
setattr(obj, _key, _value)
class Package(models.Model):
"""
Base for package classes
"""
name = models.CharField(max_length=255)
courses = models.ManyToManyField("courses.Course", null=True, blank=True)
lessons = models.ManyToManyField("lessons.Lesson", null=True, blank=True)
groups = models.ManyToManyField("facebook_groups.FacebookGroup", null=True,
blank=True)
journals = models.ManyToManyField("journals.JournalQuestion", null=True,
blank=True)
objects = PackageManager()
def __unicode__(self):
return u'{}'.format(self.name if self.name else 'Package')
def get_absolute_url(self):
return reverse('packages:detail', kwargs={'pk': self.pk})
class PackagePurchase(models.Model):
"""
User's purchased packages.
"""
INACTIVE = 0
ACTIVE = 1
EXPIRED = 2
STATUS_CHOICES = [
[INACTIVE, 'Inactive'],
[ACTIVE, 'Active'],
[EXPIRED, 'Expired'],
]
user = models.ForeignKey(settings.AUTH_USER_MODEL)
package = models.ForeignKey('Package')
status = models.IntegerField(choices=STATUS_CHOICES, default=INACTIVE)
data = JSONField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = PackagePurchaseManager()
def __unicode__(self):
return u'{0} => {1}'.format(self.user, self.package)
def set_status(self, status):
self.status = status
self.save()
class InfusionsoftPackage(Package):
"""
Package with infusionsoft api hooks
"""
subscription_id = models.TextField(blank=True, null=True)
product_id = models.TextField(blank=True, null=True)
cycle = models.TextField(blank=True, null=True)
frequency = models.TextField(blank=True, null=True)
pre_authorize_amount = models.TextField(blank=True, null=True)
prorate = models.TextField(blank=True, null=True)
active = models.TextField(blank=True, null=True)
plan_price = models.TextField(blank=True, null=True)
product_price = models.TextField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
status = models.TextField(blank=True, null=True)
action_set_id = models.TextField(blank=True, null=True)
tag = models.OneToOneField("InfusionsoftTag", blank=True, null=True)
purchase_url = models.URLField(blank=True, null=True)
def save(self, *args, **kwargs):
sync_data = self._get_sync_data(product_id=self.product_id) if self.product_id else None
if sync_data:
setdictattrs(self, sync_data)
return super(InfusionsoftPackage, self).save(*args, **kwargs)
def sync(self):
sync_data = self._get_sync_data()
if sync_data:
setdictattrs(self, sync_data)
self.save()
def _get_sync_data(self, product_id=None):
subscription_data = self._get_subscription_data(product_id)
product_data = self._get_product_data(product_id)
if subscription_data and product_data:
package_data = dict({
"id": self.id,
"pk": self.pk,
"action_set_id": self.action_set_id,
"name": product_data.get("ProductName"),
"subscription_id": subscription_data.get("Id"),
"product_id": subscription_data.get("ProductId"),
"cycle": subscription_data.get("Cycle"),
"frequency": subscription_data.get("Frequency"),
"prorate": subscription_data.get("Prorate"),
"active": subscription_data.get("Active"),
"plan_price": subscription_data.get("PlanPrice"),
"product_price": product_data.get("ProductPrice"),
"description": product_data.get("Description"),
"status": product_data.get("Status"),
})
elif product_data:
# product but not subscription
package_data = dict({
"id": self.id,
"pk": self.pk,
"action_set_id": self.action_set_id,
"name": product_data.get("ProductName"),
"product_id": product_data.get("Id"),
"product_price": product_data.get("ProductPrice"),
"description": product_data.get("Description"),
"status": product_data.get("Status"),
})
return package_data if package_data else None
def _get_subscription_data(self, product_id=None):
product_id = product_id if product_id else self.product_id
if product_id:
results = server.DataService.findByField(key, "SubscriptionPlan",
10, 0, "productid", product_id,
["Id", "ProductId", "Cycle", "Frequency", "PreAuthorizeAmount",
"Prorate", "Active", "PlanPrice"]);
return results[0] if len(results) else None
def _get_product_data(self, product_id=None):
product_id = product_id if product_id else self.product_id
if product_id:
results = server.DataService.findByField(key, "Product",
10, 0, "id", product_id,
["Id", "ProductName", "ProductPrice", "Description",
"Status", "IsPackage"]);
return results[0] if len(results) else None
def cancel_subscription(self, contactId, actionSetId):
results = server.ContactService.runActionSequence(key, contactId,
actionSetId)
return results
@property
def price(self):
return self.plan_price if self.plan_price else self.product_price
class InfusionsoftTag(models.Model):
'''
Infusionsoft Tag (ContactGroup)
'''
remote_id = models.TextField()
group_category_id = models.TextField(blank=True, null=True)
group_name = models.TextField(blank=True, null=True)
group_description = models.TextField(blank=True, null=True)
objects = InfusionsoftTagManager()
def __unicode__(self):
return u'{}'.format(self.group_name if self.group_name else u'InfusionsoftTag Object')
def save(self, *args, **kwargs):
remote_id = kwargs.get('remote_id') if kwargs.get('remote_id') else self.remote_id
sync_data = self._get_sync_data(remote_id=remote_id) if remote_id else None
if sync_data:
obj = InfusionsoftTag(**sync_data)
return super(InfusionsoftTag, obj).save(*args, **kwargs)
else:
return super(InfusionsoftTag, self).save(*args, **kwargs)
def sync(self):
sync_data = self._get_sync_data()
if sync_data:
self = InfusionsoftTag(**sync_data)
self.save()
def _get_sync_data(self, remote_id=None):
provider_data = self._get_provider_data(remote_id)
if provider_data:
tag_data = dict({
"id": self.id,
"pk": self.pk,
"remote_id": provider_data.get("Id"),
"group_category_id": provider_data.get("GroupCategoryId"),
"group_name": provider_data.get("GroupName"),
"group_description": provider_data.get("GroupDescription"),
})
return tag_data
def _get_provider_data(self, remote_id=None):
remote_id = remote_id if remote_id else self.remote_id
if remote_id:
results = server.DataService.findByField(key, "ContactGroup",
10, 0, "id", remote_id,
["Id", "GroupCategoryId", "GroupName", "GroupDescription"]);
return results[0] if len(results) else None
| 38.430493
| 96
| 0.626604
| 7,789
| 0.908868
| 0
| 0
| 104
| 0.012135
| 0
| 0
| 1,365
| 0.159277
|
3f3094c51e91fa31dccfedc07336034240d0cf3e
| 1,438
|
py
|
Python
|
chap 2/2_1.py
|
hmhuy2000/Reinforcement-Learning-SuttonBartoI
|
97ca9dc11c4cb4fda74b144e658c3eac756131ff
|
[
"MIT"
] | null | null | null |
chap 2/2_1.py
|
hmhuy2000/Reinforcement-Learning-SuttonBartoI
|
97ca9dc11c4cb4fda74b144e658c3eac756131ff
|
[
"MIT"
] | null | null | null |
chap 2/2_1.py
|
hmhuy2000/Reinforcement-Learning-SuttonBartoI
|
97ca9dc11c4cb4fda74b144e658c3eac756131ff
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
class CFG:
n = 10
mean = 0.0
variance = 1.0
t = 1000
esp = [0, 0.01, 0.05, 0.1, 0.15, 0.2]
n_try = 2000
class bandit():
def __init__(self, m, v):
self.m = m
self.v = v
self.mean = 0.0
self.cnt = 0
def reset(self):
self.mean = 0.0
self.cnt = 0
def get_reward(self):
reward = self.v * np.random.randn() + self.m
return reward
def update(self, reward):
self.cnt += 1
self.mean = self.mean + 1/self.cnt * (reward - self.mean)
def get_result(e):
bandits = [bandit(np.random.randn(),CFG.variance) for i in range(CFG.n)]
res = []
global cnt
for _ in range(CFG.t):
if (np.random.random()<e):
choose = np.random.choice(CFG.n)
else:
choose = np.argmax([ban.mean for ban in bandits])
val = bandits[choose].get_reward()
res.append(val)
bandits[choose].update(val)
# print(res)
return res
plt.figure(figsize=(20, 10))
for e in CFG.esp:
res = np.zeros(CFG.t)
for tr in trange(CFG.n_try):
res += get_result(e)
print(res.shape)
res /= CFG.n_try
# print(res)
plt.plot(res, label = e)
print(f'done {e}')
plt.xlabel('step')
plt.ylabel('average reward')
plt.legend()
plt.savefig('figure_2_1.png')
plt.show()
| 21.147059
| 76
| 0.553547
| 544
| 0.378303
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.050765
|
3f312f416c35a4ef754ae001c14b305991e498d6
| 2,080
|
py
|
Python
|
jetavator_databricks_client/setup.py
|
jetavator/jetavator_databricks
|
719c934b6391f6f41ca34b4d4df8c697c1a25283
|
[
"Apache-2.0"
] | null | null | null |
jetavator_databricks_client/setup.py
|
jetavator/jetavator_databricks
|
719c934b6391f6f41ca34b4d4df8c697c1a25283
|
[
"Apache-2.0"
] | null | null | null |
jetavator_databricks_client/setup.py
|
jetavator/jetavator_databricks
|
719c934b6391f6f41ca34b4d4df8c697c1a25283
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import io
import os
from setuptools import setup, find_packages
# Package metadata
# ----------------
SHORT_NAME = 'databricks_client'
NAME = 'jetavator_databricks_client'
DESCRIPTION = (
'Databricks support for the Jetavator engine '
'to be installed on the client system'
)
URL = 'https://github.com/jetavator/jetavator'
EMAIL = 'joetaylorconsulting@gmail.com'
AUTHOR = 'Joe Taylor'
REQUIRES_PYTHON = '>=3.7.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'jetavator>=0.1.5',
'lazy-property>=0.0.1,<1',
'databricks-cli>=0.14.1,<0.15',
'nbformat>=5.0.8>,<6',
'azure-storage-queue>=12.1.5,<13',
'azure-storage-blob>=12.7.1,<13'
]
# What packages are optional?
EXTRAS = {
# 'some-feature': ['requirement'],
}
# Package setup
# -------------
# Import the README and use it as the long description
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Import the LICENSE
with open(os.path.join(here, 'LICENSE')) as f:
license_text = f.read()
# Load the package's __version__.py module as a dictionary
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license=license_text,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7'
],
entry_points={'jetavator.plugins': f'{SHORT_NAME} = {NAME}'}
)
| 24.186047
| 73
| 0.662019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 964
| 0.463462
|
3f31a6445fe5a4545fbddede8dd570fd945d12b3
| 596
|
py
|
Python
|
web-interface/app/application/misc/pages/misc_options/purposes_sampling.py
|
horvathi94/seqmeta
|
94f2f04c372181c93a6f68b6efe15b141ef02779
|
[
"MIT"
] | null | null | null |
web-interface/app/application/misc/pages/misc_options/purposes_sampling.py
|
horvathi94/seqmeta
|
94f2f04c372181c93a6f68b6efe15b141ef02779
|
[
"MIT"
] | null | null | null |
web-interface/app/application/misc/pages/misc_options/purposes_sampling.py
|
horvathi94/seqmeta
|
94f2f04c372181c93a6f68b6efe15b141ef02779
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from .base import _MiscOptionBase
from application.src.misc.sampling import PurposesOfSampling
@dataclass
class Editor(_MiscOptionBase):
name = "Purpose of sampling"
id = "purpose_of_sampling"
link = "misc_bp.submit_purpose_of_sampling"
description = "The reason the sample was collected " \
"<em>e.g. diagnostic testing</em>"
@classmethod
def get_values(cls) -> list:
return PurposesOfSampling.fetch_list()
@classmethod
def save(cls, data: list) -> None:
PurposesOfSampling.save_by_procedure(data)
| 24.833333
| 60
| 0.718121
| 452
| 0.758389
| 0
| 0
| 463
| 0.776846
| 0
| 0
| 150
| 0.251678
|
3f327b3abcfcf0bb203bb9dab1e2f88e290b8007
| 1,816
|
py
|
Python
|
src/python3/sdp/scripts/FWR_Postprocess/nstx_singlechannel_analysis.py
|
LeiShi/Synthetic-Diagnostics-Platform
|
870120d3fd14b2a3c89c6e6e85625d1e9109a2de
|
[
"BSD-3-Clause"
] | 5
|
2019-08-16T22:08:19.000Z
|
2021-02-24T02:47:05.000Z
|
src/python3/sdp/scripts/FWR_Postprocess/nstx_singlechannel_analysis.py
|
justthepython/Synthetic-Diagnostics-Platform
|
5f1cb5c29d182490acbd4f3c167f0e09ec211236
|
[
"BSD-3-Clause"
] | 1
|
2016-05-11T12:58:00.000Z
|
2016-05-11T17:18:36.000Z
|
src/python3/sdp/scripts/FWR_Postprocess/nstx_singlechannel_analysis.py
|
justthepython/Synthetic-Diagnostics-Platform
|
5f1cb5c29d182490acbd4f3c167f0e09ec211236
|
[
"BSD-3-Clause"
] | 5
|
2018-04-29T12:35:59.000Z
|
2020-01-10T03:38:30.000Z
|
import sdp.scripts.load_nstx_exp_ref as nstx_exp
#import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp
import pickle
import numpy as np
with open('/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/ref_pos.pck','r') as f:
ref_pos = pickle.load(f)
channel = 9
nt = 50
llim = 1e-7
ulim = 1e-4
time_array = np.linspace(llim,ulim,nt)
cs_mean = np.zeros((nt))
cs_median = np.zeros((nt))
cs_std = np.zeros((nt))
def cs_scan(cha = channel):
global cs_mean,cs_std,time_array,cs_median
time_array = np.linspace(llim,ulim,nt)
cs_median = np.zeros((nt))
cs_mean = np.zeros((nt))
cs_std = np.zeros((nt))
for t in range(nt):
cs_exp = nstx_exp.analyser.Coherent_over_time(0.632,0.640,1e-6,time_array[t],loader_num = cha)
cs_mean[t] = np.mean(np.abs(cs_exp))
cs_median[t] = np.median(np.abs(cs_exp))
cs_std[t] = np.std(np.abs(cs_exp))
return cs_mean,cs_median,cs_std
def get_coh_time(cha = channel):
mean,median,std = cs_scan(cha)
t_idx = np.argmax(std)
print('optimal window for channel {1}= {0:.4}'.format(time_array[t_idx],cha))
return time_array[t_idx]
def get_coh_median_std(cha = channel, window = None):
if(window is None):
window = get_coh_time(cha)
cs_exp = nstx_exp.analyser.Coherent_over_time(0.632,0.640,1e-6,window,loader_num = cha)
cs_ab = np.abs(cs_exp)
#median = np.median(cs_ab)
#print 'divider set to be {0:.4}'.format(median)
# cs_tophalf = cs_ab[np.nonzero(cs_ab>median)]
return np.median(cs_ab),np.std(cs_ab),cs_ab
def all_channel_coh_sigs(window = None):
m = np.zeros((16))
std = np.zeros((16))
cs_sig = []
for i in range(16):
m[i],std[i],cs_tmp = get_coh_median_std(i,window = window)
cs_sig.append(cs_tmp)
return m,std,cs_sig
| 26.705882
| 102
| 0.669053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 284
| 0.156388
|
3f33208b87772b3914f2f7e4b5518f6f944741b9
| 1,236
|
py
|
Python
|
tests/integration/suites/default/reboot.py
|
bularcasergiu/Anjay
|
a76399199dc9569d58aebc4bf18c494ca2127292
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/suites/default/reboot.py
|
bularcasergiu/Anjay
|
a76399199dc9569d58aebc4bf18c494ca2127292
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/suites/default/reboot.py
|
bularcasergiu/Anjay
|
a76399199dc9569d58aebc4bf18c494ca2127292
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from framework.lwm2m_test import *
class RebootSendsResponseTest(test_suite.Lwm2mSingleServerTest):
def _get_valgrind_args(self):
# Reboot cannot be performed when demo is run under valgrind
return []
def runTest(self):
self.serv.set_timeout(timeout_s=1)
# should send a response before rebooting
req = Lwm2mExecute(ResPath.Device.Reboot)
self.serv.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv())
# should register after rebooting
self.serv.reset()
self.assertDemoRegisters(self.serv)
| 34.333333
| 75
| 0.719256
| 570
| 0.461165
| 0
| 0
| 0
| 0
| 0
| 0
| 746
| 0.60356
|
3f33ca3b9b3ca0aa03c1d9c5a2a2a55f778438b0
| 22,381
|
py
|
Python
|
astrodet/scarlet.py
|
lyf1436/astrodet
|
72d69fe38c9d744620866682e36a03c668c284f2
|
[
"MIT"
] | null | null | null |
astrodet/scarlet.py
|
lyf1436/astrodet
|
72d69fe38c9d744620866682e36a03c668c284f2
|
[
"MIT"
] | 2
|
2021-04-15T03:03:24.000Z
|
2021-04-26T19:41:57.000Z
|
astrodet/scarlet.py
|
lyf1436/astrodet
|
72d69fe38c9d744620866682e36a03c668c284f2
|
[
"MIT"
] | 3
|
2021-03-18T14:08:12.000Z
|
2021-10-08T04:26:14.000Z
|
import sys, os
import numpy as np
import scarlet
import sep
from astropy.io import ascii
import astropy.io.fits as fits
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from astropy.wcs import WCS
def write_scarlet_results(datas, observation, starlet_sources, model_frame, catalog_deblended,
segmentation_masks, dirpath, filters, s):
"""
Saves images in each channel, with headers for each source in image,
such that the number of headers = number of sources detected in image.
Parameters
----------
datas: array
array of Data objects
observation: scarlet function
Scarlet observation objects
starlet_sources: list
List of ScarletSource objects
model_frame: scarlet function
Image frame of source model
catalog_deblended: list
Deblended source detection catalog
segmentation_masks: list
List of segmentation mask of each object in image
dirpath : str
Path to HSC image file directory
filters : list
A list of filters for your images. Default is ['g', 'r', 'i'].
s : str
File basename string
Returns
-------
filename : dict
dictionary of all paths to the saved scarlet files for the particular dataset.
Saved image and model files for each filter, and one total segmentation mask file for all filters.
"""
def _make_hdr(starlet_source, cat):
"""
Helper function to make FITS header and insert metadata.
Parameters
----------
starlet_source: starlet_source
starlet_source object for source k
cat: dict
catalog data for source k
Returns
-------
model_hdr : Astropy fits.Header
FITS header for source k with catalog metadata
"""
# For each header, assign descriptive data about each source
# (x0, y0, w, h) in absolute floating pixel coordinates
bbox_h = starlet_source.bbox.shape[1]
bbox_w = starlet_source.bbox.shape[2]
bbox_y = starlet_source.bbox.origin[1] + int(np.floor(bbox_w/2)) # y-coord of the source's center
bbox_x = starlet_source.bbox.origin[2] + int(np.floor(bbox_w/2)) # x-coord of the source's center
# Ellipse parameters (a, b, theta) from deblend catalog
e_a, e_b, e_theta = cat['a'], cat['b'], cat['theta']
ell_parm = np.concatenate((cat['a'], cat['b'], cat['theta']))
# Add info to header
model_hdr = fits.Header()
model_hdr['bbox'] = ','.join(map(str, [bbox_x, bbox_y, bbox_w, bbox_h]))
model_hdr['area'] = bbox_w * bbox_h
model_hdr['ell_parm'] = ','.join(map(str, list(ell_parm)))
model_hdr['cat_id'] = 1 # Category ID #TODO: set categor_id based on if the source is extended or not
return model_hdr
# Create dict for all saved filenames
segmask_hdul = []
model_hdul = []
filenames = {}
# Filter loop
for i, f in enumerate(filters): # datas is HSC data array with dimensions [filters, N, N]
f = f.upper()
# Primary HDU is full image
img_hdu = fits.PrimaryHDU(data=datas[i])
# Create header entry for each scarlet source
for k, (src, cat) in enumerate(zip(starlet_sources, catalog_deblended)):
# Get each model, make into image
model = starlet_sources[k].get_model(frame=model_frame)
model = observation.render(model)
model = src.bbox.extract_from(model)
model_hdr = _make_hdr(starlet_sources[k], cat)
model_hdu = fits.ImageHDU(data=model[i], header=model_hdr)
model_primary = fits.PrimaryHDU()
model_hdul.append(model_hdu)
# Write final fits file to specified location
# Save full image and then headers per source w/ descriptive info
save_img_hdul = fits.HDUList([img_hdu])
save_model_hdul = fits.HDUList([model_primary, *model_hdul])
# Save list of filenames in dict for each band
filenames[f'img_{f}'] = os.path.join(dirpath, f'{f}-{s}_scarlet_img.fits')
save_img_hdul.writeto(filenames[f'img_{f}'], overwrite=True)
filenames[f'model_{f}'] = os.path.join(dirpath, f'{f}-{s}_scarlet_model.fits')
save_model_hdul.writeto(filenames[f'model_{f}'], overwrite=True)
# If we have segmentation mask data, save them as a separate fits file
if segmentation_masks is not None:
# Create header entry for each scarlet source
for k, (src, cat) in enumerate(zip(starlet_sources, catalog_deblended)):
segmask_hdr = _make_hdr(starlet_sources[k], cat)
# Save each model source k in the image
segmask_hdu = fits.ImageHDU(data=segmentation_masks[k], header=segmask_hdr)
segmask_primary = fits.PrimaryHDU()
segmask_hdul.append(segmask_hdu)
save_segmask_hdul = fits.HDUList([segmask_primary, *segmask_hdul])
# Save list of filenames in dict for each band
filenames['segmask'] = os.path.join(dirpath, f'{f}-{s}_scarlet_segmask.fits')
save_segmask_hdul.writeto(filenames['segmask'], overwrite=True)
return filenames
def plot_stretch_Q(datas, stretches=[0.01,0.1,0.5,1], Qs=[1,10,5,100]):
"""
Plots different normalizations of your image using the stretch, Q parameters.
Parameters
----------
stretches : array
List of stretch params you want to permutate through to find optimal image normalization.
Default is [0.01, 0.1, 0.5, 1]
Qs : array
List of Q params you want to permutate through to find optimal image normalization.
Default is [1, 10, 5, 100]
Code adapted from:
https://pmelchior.github.io/scarlet/tutorials/display.html
Returns
-------
fig : Figure object
"""
fig, ax = plt.subplots(len(stretches), len(Qs), figsize=(9,9))
for i, stretch in enumerate(stretches):
for j, Q in enumerate(Qs):
asinh = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q)
# Scale the RGB channels for the image
img_rgb = scarlet.display.img_to_rgb(datas, norm=asinh)
ax[i][j].imshow(img_rgb)
ax[i][j].set_title("Stretch {}, Q {}".format(stretch, Q))
ax[i][j].axis('off')
return fig
def make_catalog(datas, lvl=4, wave=True, segmentation_map=False, maskthresh=10.0, object_limit=100000):
"""
Creates a detection catalog by combining low and high resolution data
Parameters
----------
datas: array
array of Data objects
lvl: int
detection lvl
wave: Bool
set to True to use wavelet decomposition of images before combination
subtract_background : Bool
if you want to subtract the background and retrieve an estimate, change to True. But default
is False because HSC images are already background subtracted.
segmentation_map : Bool
Whether to run sep segmentation map
maskthresh : float
Mask threshold for sep segmentation
object_limit : int
Limit on number of objects to detect in image
Code adapted from https://pmelchior.github.io/scarlet/tutorials/wavelet_model.html
Returns
-------
catalog: sextractor catalog
catalog of detected sources (use 'catalog.dtype.names' for info)
bg_rms: array
background level for each data set (set to None if subtract_background is False)
"""
if type(datas) is np.ndarray:
hr_images = datas / np.sum(datas, axis=(1, 2))[:, None, None]
# Detection image as the sum over all images
detect_image = np.sum(hr_images, axis=0)
else:
data_lr, data_hr = datas
# Create observations for each image
# Interpolate low resolution to high resolution
interp = interpolate(data_lr, data_hr)
# Normalization of the interpolate low res images
interp = interp / np.sum(interp, axis=(1, 2))[:, None, None]
# Normalisation of the high res data
hr_images = data_hr.images / np.sum(data_hr.images, axis=(1, 2))[:, None, None]
# Detection image as the sum over all images
detect_image = np.sum(interp, axis=0) + np.sum(hr_images, axis=0)
detect_image *= np.sum(data_hr.images)
if np.size(detect_image.shape) == 4:
if wave:
# Wavelet detection in the first three levels
wave_detect = scarlet.Starlet(detect_image.mean(axis=0), lvl=5).coefficients
wave_detect[:, -1, :, :] = 0
detect = scarlet.Starlet(coefficients=wave_detect).image
else:
# Direct detection
detect = detect_image.mean(axis=0)
else:
if wave:
wave_detect = scarlet.Starlet(detect_image).coefficients
detect = wave_detect[0][0] + wave_detect[0][1] + wave_detect[0][2]
else:
detect = detect_image
bkg = sep.Background(detect)
# Set the limit on the number of sub-objects when deblending.
sep.set_sub_object_limit(object_limit)
# Extract detection catalog with segmentation maps!
# Can use this to retrieve ellipse params
catalog = sep.extract(detect, lvl, err=bkg.globalrms, segmentation_map=segmentation_map, maskthresh=maskthresh)
# Estimate background
if type(datas) is np.ndarray:
bkg_rms = scarlet.wavelet.mad_wavelet(datas)
else:
bkg_rms = []
for data in datas:
bkg_rms.append(scarlet.wavelet.mad_wavelet(data.images))
return catalog, bkg_rms
def fit_scarlet_blend(starlet_sources, observation, max_iters=15, e_rel=1e-4, plot_likelihood=True):
"""
Creates a detection catalog by combining low and high resolution data
Parameters
----------
datas: array
array of Data objects
Will end early if likelihood and constraints converge
Returns
-------
"""
# Create and fit Blend model. Go for 200 iterations,
# but will end early if likelihood and constraints converge
print(f"Fitting Blend model.")
try:
starlet_blend = scarlet.Blend(starlet_sources, observation)
it, logL = starlet_blend.fit(max_iters, e_rel=e_rel)
print(f"Scarlet ran for {it} iterations to logL = {logL}")
# Catch any exceptions like no detections
except AssertionError as e1:
print(f"Length of detection catalog is {len(catalog)}.")
if plot_likelihood == True:
scarlet.display.show_likelihood(starlet_blend)
plt.show()
return starlet_blend, logL
def _plot_wavelet(datas):
"""
Helper function to plot wavelet transformation diagnostic figures with scarlet
Parameters
----------
datas: array
array of Data objects
Returns
-------
"""
# Declare a starlet object (and performs the transform)
Sw = scarlet.Starlet(datas, lvl=5, direct=True)
# This is the starlet transform as an array
w = Sw.coefficients
# The inverse starlet transform of w (new object otherwise, the tranform is not used)
iw = Sw.image
# TODO: Clean this code up using plt.subplots()
# The wavelet transform of the first slice of images in pictures
lvl = w.shape[1]
plt.figure(figsize=(lvl*5+5,5))
plt.suptitle('Wavelet coefficients')
for i in range(lvl):
plt.subplot(1, lvl, i+1)
plt.title('scale' + str(i+1))
plt.imshow(w[0,i], cmap='inferno')
plt.colorbar()
plt.show()
# Making sure we recover the original image
plt.figure(figsize=(30,10))
plt.subplot(131)
plt.title('Original image', fontsize=20)
plt.imshow(datas[0], cmap='inferno')
plt.colorbar()
plt.subplot(132)
plt.title('Starlet-reconstructed image', fontsize=20)
plt.imshow(iw[0], cmap='inferno')
plt.colorbar()
plt.subplot(133)
plt.title('Absolute difference', fontsize=20)
plt.imshow((np.abs(iw[0]-datas[0])), cmap='inferno')
plt.colorbar()
plt.show()
return
def _plot_scene(starlet_sources, observation, norm, catalog, show_model=True, show_rendered=True,
show_observed=True, show_residual=True, add_labels=True, add_boxes=True,
add_ellipses=True):
"""
Helper function to plot scene with scarlet
Parameters
----------
starlet_sources: List
List of ScarletSource objects
observation:
Scarlet observation objects
norm:
Scarlet normalization for plotting
catalog: list
Source detection catalog
show_model: bool
Whether to show model
show_rendered: bool
Whether to show rendered model
show_observed: bool
Whether to show observed
show_residual: bool
Whether to show residual
add_labels: bool
Whether to add labels
add_boxes: bool
Whether to add bounding boxes to each panel
add_ellipses: bool
Whether to add ellipses to each panel
Returns
-------
fig : matplotlib Figure
Figure object
"""
fig = scarlet.display.show_scene(starlet_sources, observation=observation, norm=norm,
show_model=show_model, show_rendered=show_rendered,
show_observed=show_observed, show_residual=show_residual,
add_labels=add_labels, add_boxes=add_boxes)
for ax in fig.axes:
# Plot sep ellipse around all sources from the detection catalog
if add_ellipses == True:
for k, src in enumerate(catalog):
# See https://sextractor.readthedocs.io/en/latest/Position.html
e = Ellipse(xy=(src['x'], src['y']),
width=6*src['a'],
height=6*src['b'],
angle=np.rad2deg(src['theta']))
e.set_facecolor('none')
e.set_edgecolor('white')
ax.add_artist(e)
ax.axis('off')
fig.subplots_adjust(wspace=0.01)
plt.show()
return fig
def run_scarlet(datas, filters, stretch=0.1, Q=5, sigma_model=1, sigma_obs=5,
subtract_background=False, max_chi2=5000, max_iters=15, morph_thresh=0.1,
starlet_thresh=0.1, lvl=5, lvl_segmask=2, maskthresh=0.025,
segmentation_map=True, plot_wavelet=False, plot_likelihood=True,
plot_scene=False, plot_sources=False, add_ellipses=True,
add_labels=False, add_boxes=False):
""" Run P. Melchior's scarlet (https://github.com/pmelchior/scarlet) implementation
for source separation. This function will create diagnostic plots, a source detection catalog,
and fit a model for all sources in the observation scene (image).
Parameters
----------
subtract_background : boolean
Whether or not to estimate and subtract the background (often background is already subtracted)
Detault is False
plot_wavelet_transform : boolean
Plot starlet wavelet transform and inverse transform at different scales.
NOTE: Not really useful at large image sizes (> ~few hundred pixels length/height)
Default is False
plot_detections : boolean
Plot detection catalog results. Default is False
plot_likelihood : boolean
Plot likelihood as function of iterations from Blend fit function. Default is True
plot_full_scene : boolean
Plot full scene with the model, rendered model, observation, and residual. Default is False.
plot_all_sources : boolean
Plot the model, rendered model, observation, and spectrum across channels for each object.
WARNING: dumb to do this with a large image with many sources! Default is False
plot_first_isolated_comp : boolean
Plot the subtracted and isolated first (or any) starlet component. Recommended for finding a bright
component. Default is False.
Return
-------
FITS file with...
TODO: fill this out once I get the exact fits file output generated to Colin's liking
"""
norm = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q)
# Generate source catalog using wavelets
catalog, bg_rms_hsc = make_catalog(datas, lvl, wave=True)
# If image is already background subtracted, weights are set to 1
if subtract_background:
weights = np.ones_like(datas) / (bg_rms_hsc**2)[:, None, None]
else:
weights = np.ones_like(datas)
print("Source catalog found ", len(catalog), "objects")
# Plot wavelet transform at different scales
if plot_wavelet == True:
_plot_wavelet(datas)
# Define model frame and observations:
model_psf = scarlet.GaussianPSF(sigma=sigma_model) #, boxsize=100)
model_frame = scarlet.Frame(datas.shape, psf=model_psf, channels=filters)
observation_psf = scarlet.GaussianPSF(sigma=sigma_obs)
observation = scarlet.Observation(datas, psf=observation_psf, weights=weights, channels=filters).match(model_frame)
# Initialize starlet sources to be fit. Assume extended sources for all because
# we are not looking at all detections in each image
# TODO: Plot chi2 vs. binned size and mag. Implement conidition if chi2 > xxx then
# add another component until larger sources are modeled well
print("Initializing starlet sources to be fit.")
# Compute radii and spread of sources
Rs = np.sqrt(catalog['a']**2 + catalog['b']**2)
spread = Rs/sigma_obs
# Array of chi^2 residuals computed after fit on each model
chi2s = np.zeros(len(catalog))
# Loop through detections in catalog
starlet_sources = []
for k, src in enumerate(catalog):
# Is the source compact relative to the PSF?
if spread[k] < 1:
compact = True
else:
compact = False
# Try modeling each source as a single ExtendedSource first
new_source = scarlet.ExtendedSource(model_frame, (src['y'], src['x']), observation,
K=1, thresh=morph_thresh, compact=compact)
starlet_sources.append(new_source)
# Fit scarlet blend
starlet_blend, logL = fit_scarlet_blend(starlet_sources, observation, max_iters=max_iters, plot_likelihood=plot_likelihood)
print("Computing residuals.")
# Compute reduced chi^2 for each rendered sources
for k, src in enumerate(starlet_sources):
model = src.get_model(frame=model_frame)
model = observation.render(model)
res = datas - model
# Compute in bbox only
res = src.bbox.extract_from(res)
chi2s[k] = np.sum(res**2)
# Replace models with poor fits with StarletSource models
if chi2s[k] > max_chi2:
starlet_sources[k] = scarlet.StarletSource(model_frame,
(catalog["y"][k], catalog["x"][k]), observation,
thresh=morph_thresh, starlet_thresh=starlet_thresh,
full=False)
# If any chi2 residuals are flagged, re-fit the blend with a more complex model
if np.any(chi2s > max_chi2):
print("Re-fitting with Starlet models for poorly-fit sources.")
starlet_blend, logL = fit_scarlet_blend(starlet_sources, observation, max_iters=max_iters, plot_likelihood=plot_likelihood)
# Extract the deblended catalog and update the chi2 residuals
print('Extracting deblended catalog.')
catalog_deblended = []
segmentation_masks = []
for k, src in enumerate(starlet_sources):
model = src.get_model(frame=model_frame)
model = observation.render(model)
# Compute in bbox only
model = src.bbox.extract_from(model)
# Run sep
try:
cat, _ = make_catalog(model, lvl_segmask, wave=False, segmentation_map=False, maskthresh=maskthresh)
except:
print(f'Exception with source {k}')
cat = []
#if segmentation_map == True:
# cat, mask = cat
# If more than 1 source is detected for some reason (e.g. artifacts)
if len(cat) > 1:
# keep the brightest
idx = np.argmax([c['cflux'] for c in cat])
cat = cat[idx]
# if segmentation_map == True:
# mask = mask[idx]
# If failed to detect model source
if len(cat) == 0:
# Fill with nan
cat = [np.full(catalog[0].shape, np.nan, dtype=catalog.dtype)]
# Append to full catalog
if segmentation_map == True:
# For some reason sep doesn't like these images, so do the segmask ourselves for now
model_det = np.array(model[0,:,:])
mask = np.zeros_like(model_det)
mask[model_det>maskthresh] = 1
segmentation_masks.append(mask)
#plt.imshow(mask)
#plt.show()
catalog_deblended.append(cat)
# Combine catalog named array
catalog_deblended = np.vstack(catalog_deblended)
# Plot scene: rendered model, observations, and residuals
if plot_scene == True:
_plot_scene(starlet_sources, observation, norm, catalog, show_model=False, show_rendered=True,
show_observed=True, show_residual=True, add_labels=add_labels, add_boxes=add_boxes, add_ellipses=add_ellipses)
# Plot each for each source
if plot_sources == True:
scarlet.display.show_sources(starlet_sources, observation, norm=norm,
show_rendered=True, show_observed=True,
add_boxes=add_boxes)
plt.show()
return observation, starlet_sources, model_frame, catalog, catalog_deblended, segmentation_masks
| 37.116086
| 131
| 0.629105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,284
| 0.459497
|
3f348185cd12109292cb8c384d3bec9afb87b02b
| 193
|
py
|
Python
|
serve.py
|
haiyoumeiyou/cherrybrigde
|
f00a0592240b60cc42b895ad194b0273485956d0
|
[
"BSD-3-Clause"
] | null | null | null |
serve.py
|
haiyoumeiyou/cherrybrigde
|
f00a0592240b60cc42b895ad194b0273485956d0
|
[
"BSD-3-Clause"
] | null | null | null |
serve.py
|
haiyoumeiyou/cherrybrigde
|
f00a0592240b60cc42b895ad194b0273485956d0
|
[
"BSD-3-Clause"
] | null | null | null |
from application import bootstrap
bootstrap()
if __name__=='__main__':
import cherrypy
cherrypy.engine.signals.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
| 19.3
| 39
| 0.720207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.051813
|
3f3537a4e2a9c606bd390358c783d299bde031c0
| 2,125
|
py
|
Python
|
ooobuild/lo/smarttags/x_range_based_smart_tag_recognizer.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/smarttags/x_range_based_smart_tag_recognizer.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/smarttags/x_range_based_smart_tag_recognizer.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.smarttags
import typing
from abc import abstractmethod
from ..lang.x_initialization import XInitialization as XInitialization_d46c0cca
if typing.TYPE_CHECKING:
from ..frame.x_controller import XController as XController_b00e0b8f
from .smart_tag_recognizer_mode import SmartTagRecognizerMode as SmartTagRecognizerMode_9179119e
from ..text.x_text_markup import XTextMarkup as XTextMarkup_a5d60b3a
from ..text.x_text_range import XTextRange as XTextRange_9a910ab7
class XRangeBasedSmartTagRecognizer(XInitialization_d46c0cca):
"""
provides access to a range based smart tag recognizer.
See Also:
`API XRangeBasedSmartTagRecognizer <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1smarttags_1_1XRangeBasedSmartTagRecognizer.html>`_
"""
__ooo_ns__: str = 'com.sun.star.smarttags'
__ooo_full_ns__: str = 'com.sun.star.smarttags.XRangeBasedSmartTagRecognizer'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.smarttags.XRangeBasedSmartTagRecognizer'
@abstractmethod
def recognizeTextRange(self, xRange: 'XTextRange_9a910ab7', eDataType: 'SmartTagRecognizerMode_9179119e', xTextMarkup: 'XTextMarkup_a5d60b3a', aApplicationName: str, xController: 'XController_b00e0b8f') -> None:
"""
recognizes smart tags.
"""
__all__ = ['XRangeBasedSmartTagRecognizer']
| 42.5
| 215
| 0.777412
| 866
| 0.407529
| 0
| 0
| 286
| 0.134588
| 0
| 0
| 1,294
| 0.608941
|
3f3653bf5b35e045e2b4c2aeff6f681433eea55f
| 924
|
py
|
Python
|
apprest/plugins/icat/views/ICAT.py
|
acampsm/calipsoplus-backend
|
b66690124bd2f2541318ddb83b18e082b5df5676
|
[
"MIT"
] | 4
|
2018-12-04T15:08:27.000Z
|
2019-04-11T09:49:41.000Z
|
apprest/plugins/icat/views/ICAT.py
|
acampsm/calipsoplus-backend
|
b66690124bd2f2541318ddb83b18e082b5df5676
|
[
"MIT"
] | 63
|
2018-11-22T13:07:56.000Z
|
2021-06-10T20:55:58.000Z
|
apprest/plugins/icat/views/ICAT.py
|
AlexRogalskiy/calipsoplus-backend
|
3f6b034f16668bc154b0f4b759ed62b055f41647
|
[
"MIT"
] | 10
|
2018-11-23T08:17:28.000Z
|
2022-01-15T23:41:59.000Z
|
from rest_framework import status
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from apprest.plugins.icat.helpers.complex_encoder import JsonResponse
from apprest.plugins.icat.services.ICAT import ICATService
class GetInvestigationUsers(APIView):
"""
get:
Return: Users involved in an investigation
"""
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticated,)
pagination_class = None
def get(self, request, *args, **kwargs):
service = ICATService()
investigation_id = self.kwargs.get('investigation_id')
investigation_users = service.get_users_involved_in_investigation(investigation_id, request)
return JsonResponse(investigation_users, status=status.HTTP_200_OK)
| 34.222222
| 100
| 0.787879
| 576
| 0.623377
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.091991
|
3f374d2a724eacf4543f9a4bee934b7b700f04f6
| 396
|
py
|
Python
|
python/phevaluator/table_tests/test_hashtable8.py
|
StTronn/PokerHandEvaluator
|
3611a7072c2a62844d6aca32d798aafa59e4606d
|
[
"Apache-2.0"
] | 1
|
2020-11-12T14:35:02.000Z
|
2020-11-12T14:35:02.000Z
|
python/phevaluator/table_tests/test_hashtable8.py
|
StTronn/PokerHandEvaluator
|
3611a7072c2a62844d6aca32d798aafa59e4606d
|
[
"Apache-2.0"
] | null | null | null |
python/phevaluator/table_tests/test_hashtable8.py
|
StTronn/PokerHandEvaluator
|
3611a7072c2a62844d6aca32d798aafa59e4606d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from table_tests.utils import BaseTestNoFlushTable
from evaluator.hashtable8 import NO_FLUSH_8
class TestNoFlush8Table(BaseTestNoFlushTable):
TOCOMPARE = NO_FLUSH_8
TABLE = [0] * len(TOCOMPARE)
VISIT = [0] * len(TOCOMPARE)
NUM_CARDS = 8
def test_noflush8_table(self):
self.assertListEqual(self.TABLE, self.TOCOMPARE)
if __name__ == "__main__":
unittest.main()
| 23.294118
| 52
| 0.765152
| 236
| 0.59596
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.025253
|
3f37c1453442f813e72c82a4ee9d8d0dd3dbc36c
| 921
|
py
|
Python
|
Marcelina_Skoczylas_praca_domowa_3.py
|
marcelinaskoczylas/python_wprowadzenie_warsztaty_2021
|
9614d791b253a15a117960deadb7375c681e8a27
|
[
"MIT"
] | 1
|
2021-11-12T10:17:02.000Z
|
2021-11-12T10:17:02.000Z
|
Marcelina_Skoczylas_praca_domowa_3.py
|
marcelinaskoczylas/python_wprowadzenie_warsztaty_2021
|
9614d791b253a15a117960deadb7375c681e8a27
|
[
"MIT"
] | 3
|
2021-11-07T12:27:46.000Z
|
2021-12-11T18:20:58.000Z
|
Marcelina_Skoczylas_praca_domowa_3.py
|
marcelinaskoczylas/python_wprowadzenie_warsztaty_2021
|
9614d791b253a15a117960deadb7375c681e8a27
|
[
"MIT"
] | 23
|
2021-11-07T12:16:58.000Z
|
2021-11-26T21:03:29.000Z
|
#zadanie 1
i=1
j=1
k=1
ciag=[1,1]
while len(ciag)<50:
k=i+j
j=i
i=k
ciag.append(k)
print(ciag)
#zadanie 2
wpisane=str(input("Proszę wpisać dowolne słowa po przecinku "))
zmienne=wpisane.split(",")
def funkcja(*args):
'''Funkcja sprawdza długość słów i usuwa te, które są za krótkie'''
lista=[]
lista2=[]
wartosc = int(input("Proszę wpisać jakąś wartość "))
for arg in args:
lista.append(arg)
dlugosc=len(arg)
if len(arg)>wartosc:
lista2.append(arg)
procenty=(len(lista2)/len(lista))*100
return procenty,lista,lista2
print(funkcja(zmienne))
#zadanie 3
liczby=list(input("Proszę wpisać liczby po przecinku: "))
unikalna_lista=[]
n=1
a=liczby[n]
unikalna_lista.append(liczby[0])
while n<len(liczby):
if liczby[n]!=unikalna_lista[n-1]:
unikalna_lista.append(a)
n+=1
| 18.058824
| 72
| 0.606949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 232
| 0.246809
|
3f3831fca3eb8519b2004ca6b866229be692631e
| 91
|
py
|
Python
|
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | 4
|
2020-05-13T19:34:27.000Z
|
2021-09-20T09:01:10.000Z
|
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | null | null | null |
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py
|
RhinohawkUAV/rh_ros
|
e13077060bdfcc231adee9731ebfddadcd8d6b4a
|
[
"MIT"
] | 2
|
2019-09-14T14:45:09.000Z
|
2020-11-22T01:46:59.000Z
|
from linePathSegment import LinePathSegment
from lineSegmentFinder import LineSegmentFinder
| 45.5
| 47
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3f39db2a6e3725e4d6d3a964e14a0df2e6772218
| 655
|
py
|
Python
|
days/day01/part2.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
days/day01/part2.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
days/day01/part2.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
from helpers import inputs
def solution(day):
depths = inputs.read_to_list(f"inputs/{day}.txt")
part2_total = 0
for index, depth in enumerate(depths):
if index - 3 >= 0:
current_window = (
int(depth) + int(depths[index - 1]) + int(depths[index - 2])
)
previous_window = (
int(depths[index - 1])
+ int(depths[index - 2])
+ int(depths[index - 3])
)
diff = current_window - previous_window
if diff > 0:
part2_total += 1
return f"Day 01 Part 2 Total Depth Increase: {part2_total}"
| 31.190476
| 76
| 0.51145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.108397
|
3f3a04716997d73eaef4e151bd98036259ad059e
| 1,183
|
py
|
Python
|
src/unicon/plugins/nxos/n5k/service_statements.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 18
|
2019-11-23T23:14:53.000Z
|
2022-01-10T01:17:08.000Z
|
src/unicon/plugins/nxos/n5k/service_statements.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 12
|
2020-11-09T20:39:25.000Z
|
2022-03-22T12:46:59.000Z
|
src/unicon/plugins/nxos/n5k/service_statements.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 32
|
2020-02-12T15:42:22.000Z
|
2022-03-15T16:42:10.000Z
|
from unicon.eal.dialogs import Statement
from .service_patterns import NxosN5kReloadPatterns
from unicon.plugins.nxos.service_statements import (login_stmt, password_stmt,
enable_vdc, admin_password)
from unicon.plugins.generic.service_statements import (save_env,
auto_provision, auto_install_dialog,
setup_dialog, confirm_reset,
press_enter, confirm_config, module_reload, save_module_cfg,
secure_passwd_std, )
# for nxos n5k single rp reload
pat = NxosN5kReloadPatterns()
reload_confirm_nxos = Statement(pattern=pat.reload_confirm_nxos,
action='sendline(y)',
loop_continue=True,
continue_timer=False)
# reload statement list for nxos n5k single-rp
nxos_reload_statement_list = [save_env, confirm_reset, reload_confirm_nxos,
press_enter, login_stmt, password_stmt,
confirm_config, setup_dialog,
auto_install_dialog, module_reload,
save_module_cfg, secure_passwd_std,
admin_password, auto_provision, enable_vdc]
| 43.814815
| 78
| 0.658495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.076078
|
3f3a4efcbbb562167b72ebf68516d5cfd976b799
| 11,321
|
py
|
Python
|
src/clic/cloud.py
|
NathanRVance/clic
|
e28f7f2686f5ac6689b384474e3fdfa4d207f6ec
|
[
"MIT"
] | 2
|
2017-12-13T03:41:07.000Z
|
2019-03-12T14:08:42.000Z
|
src/clic/cloud.py
|
NathanRVance/clic
|
e28f7f2686f5ac6689b384474e3fdfa4d207f6ec
|
[
"MIT"
] | null | null | null |
src/clic/cloud.py
|
NathanRVance/clic
|
e28f7f2686f5ac6689b384474e3fdfa4d207f6ec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from clic import nodes
import time
import os
import logging as loggingmod
logging = loggingmod.getLogger('cloud')
logging.setLevel(loggingmod.WARNING)
def getCloud():
return gcloud()
class abstract_cloud:
def __init__(self):
pass
def makeImage(self, instanceName, recreateInstance):
pass
def create(self, node):
pass
def delete(node):
pass
def deleteDisk(diskName):
pass
def getDisks():
# Return: [diskName, ...]
pass
def getSshKeys():
# Return: [[keyUser, keyValue], ...]
pass
def setSshKeys(keys):
# keys: [[keyUser, keyValue], ...]
pass
def nodesUp(self, running):
# Return: [{'node' : node, 'name': name, 'running' : True|False, 'ip' : IP} ...]
pass
def getStartupScript(self):
from pathlib import Path
from pwd import getpwnam
cmds = ['index=2000; for user in `ls /home`; do usermod -o -u $index $user; groupmod -o -g $index $user; let "index += 1"; done']
for path in Path('/home').iterdir():
if path.is_dir():
localUser = path.parts[-1]
try:
uid = getpwnam(localUser).pw_uid
cmds.append('usermod -o -u {0} {1}'.format(uid, localUser))
gid = getpwnam(localUser).pw_gid
cmds.append('groupmod -o -g {0} {1}'.format(gid, localUser))
except KeyError:
continue
import configparser
config = configparser.ConfigParser()
config.read('/etc/clic/clic.conf')
user = config['Daemon']['user']
hostname = os.popen('hostname -s').read().strip()
if not config['Daemon'].getboolean('cloudHeadnode'):
import ipgetter
cmds.append('sudo clic-synchosts {0}:{1}'.format(hostname, ipgetter.myip()))
# Port 6817 traffic is slurm compute to head, 6818 is slurm head to compute
#cmds.append('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=error -i /home/{0}/.ssh/id_rsa -fN -L 6817:localhost:6817 {0}@{1}'.format(user, hostname))
#cmds.append('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=error -i /home/{0}/.ssh/id_rsa -fN -R 6818:localhost:6818 {0}@{1}'.format(user, hostname))
cmds.append('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=error -i /home/{0}/.ssh/id_rsa -fN -L 3049:localhost:2049 {0}@{1}'.format(user, hostname))
cmds.append('sudo mount -t nfs4 -o port=3049,rw localhost:/home /home')
cmds.append('if [ ! -d "/bind-root" ]; then sudo mkdir /bind-root; fi')
cmds.append('sudo mount --bind / /bind-root')
cmds.append('for user in `ls /home`; do sudo mount --bind /bind-root/home/$user/.ssh /home/$user/.ssh; done')
cmds.append('sudo mount -t nfs4 -o port=3049,ro localhost:/etc/slurm /etc/slurm')
cmds.append('sudo systemctl restart slurmd.service')
return cmds
class gcloud(abstract_cloud):
# Docs: https://developers.google.com/resources/api-libraries/documentation/compute/v1/python/latest/
def __init__(self):
import configparser
config = configparser.ConfigParser()
config.read('/etc/clic/clic.conf')
settings = config['Cloud']
self.project = settings['project']
self.zone = settings['zone']
self.image = settings['image']
import googleapiclient.discovery
# Must first do sudo gcloud auth application-default login
self.api = googleapiclient.discovery.build('compute', 'v1')
def isDone(self, operation):
from googleapiclient.errors import HttpError
# There's probably some elegant way to do this. I don't know that way.
try:
return self.api.zoneOperations().get(project=self.project, zone=self.zone, operation=operation['name']).execute()['status'] == 'DONE'
except HttpError:
return self.api.globalOperations().get(project=self.project, operation=operation['name']).execute()['status'] == 'DONE'
def wait(self, operation):
while True:
if self.isDone(operation):
break
time.sleep(1)
def makeImage(self, instanceName, recreateInstance=False):
diskName = [disk for disk in self.api.instances().get(project=self.project, zone=self.zone, instance=instanceName).execute()['disks'] if disk['boot']][0]['deviceName']
print("Setting disk autodelete to False")
self.wait(self.api.instances().setDiskAutoDelete(project=self.project, zone=self.zone, instance=instanceName, autoDelete=False, deviceName=diskName).execute())
# Grab instance data to recreate it later
machineType = self.api.instances().get(project=self.project, zone=self.zone, instance=instanceName).execute()['machineType']
print("Deleting instance")
self.wait(self.deleteName(instanceName))
# Create the image
self.diskToImage(diskName)
if recreateInstance:
print("Recreating instance")
config = {'name': instanceName, 'machineType': machineType,
'disks': [
{
'boot': True,
'autoDelete': True,
'deviceName': diskName,
'source': 'projects/{0}/zones/{1}/disks/{2}'.format(self.project, self.zone, diskName)
}
],
"serviceAccounts": [ { "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } ],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}]
}
self.wait(self.api.instances().insert(project=self.project, zone=self.zone, body=config).execute())
def diskToImage(self, diskName):
print("Creating image")
self.wait(self.api.images().insert(project=self.project, body={
'sourceDisk' : 'zones/{0}/disks/{1}'.format(self.zone, diskName),
'name' : self.image,
'family' : self.image
}).execute())
def create(self, node):
try:
# Get the latest image
image_response = self.api.images().getFromFamily(project=self.project, family=self.image).execute()
source_disk_image = image_response['selfLink']
machine_type = 'zones/{0}/machineTypes/n1-{1}-{2}'.format(self.zone, node.partition.mem, node.partition.cpus)
config = {'name': node.name, 'machineType': machine_type,
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'diskSizeGb': int(node.partition.disk * 1.1),
'sourceImage': source_disk_image,
}
}
],
'metadata': {
'items': [
{
'key': 'startup-script',
'value': '#! /bin/bash\n{}'.format('\n'.join(self.getStartupScript()))
}
]
},
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}]
}
return self.api.instances().insert(project=self.project, zone=self.zone, body=config).execute()
except Exception as e:
logging.error(traceback.format_exc())
def delete(self, node):
return self.deleteName(node.name)
def deleteName(self, name):
try:
return self.api.instances().delete(project=self.project, zone=self.zone, instance=name).execute()
except Exception as e:
logging.error(traceback.format_exc())
def deleteDisk(self, diskName):
from googleapiclient.errors import HttpError
try:
return self.api.disks().delete(project=self.project, zone=self.zone, disk=diskName).execute()
except Exception as e:
logging.error(traceback.format_exc())
def getDisks(self):
try:
return [disk['name'] for disk in self.api.disks().list(project=self.project, zone=self.zone).execute().get('items', [])]
except Exception as e:
logging.error(traceback.format_exc())
return []
def getSshKeys(self):
keys = []
try:
for key in next(value['value'] for value in self.api.projects().get(project=self.project).execute()['commonInstanceMetadata']['items'] if value['key'] == 'sshKeys').split('\n'):
keys.append(key.split(':', 1))
except Exception as e:
logging.error(traceback.format_exc())
return keys
def setSshKeys(self, keys):
try:
current = self.api.projects().get(project=self.project).execute()['commonInstanceMetadata']
formatKeys = [':'.join(key) for key in keys]
next(value for value in current['items'] if value['key'] == 'sshKeys')['value'] = '\n'.join(formatKeys)
self.wait(self.api.projects().setCommonInstanceMetadata(project=self.project, body=current).execute())
except Exception as e:
logging.error(traceback.format_exc())
def nodesUp(self, running):
try:
allNodes = []
for item in self.api.instances().list(project=self.project, zone=self.zone).execute().get('items', []):
node = {'node' : nodes.getNode(item['name']), 'name' : item['name'], 'running' : item['status'] == 'RUNNING'}
if node['running']:
node['ip'] = item['networkInterfaces'][0]['accessConfigs'][0]['natIP']
else:
node['ip'] = ''
allNodes.append(node)
if not running:
return allNodes
else:
return [node for node in allNodes if node['running']]
except Exception as e:
logging.error(traceback.format_exc())
def main():
import argparse
parser = argparse.ArgumentParser(description='Execute cloud API commands')
from clic import version
parser.add_argument('-v', '--version', action='version', version=version.__version__)
image = parser.add_argument_group()
image.add_argument('--image', metavar='NAME', nargs=1, help='Create an image from NAME')
image.add_argument('--recreate', action='store_true', help='Recreate NAME after creating an image')
args = parser.parse_args()
if args.image:
getCloud().makeImage(args.image[0], args.recreate)
| 45.103586
| 192
| 0.569473
| 10,526
| 0.929777
| 0
| 0
| 0
| 0
| 0
| 0
| 3,139
| 0.277272
|
3f3cb556d7979e79091d66cbe322dfcac371f91c
| 3,185
|
py
|
Python
|
HyperUnmixing/visualization.py
|
mdbresh/HyperUnmixing
|
9ed1be74da48ff80298099497194efa2e97b7fbe
|
[
"MIT"
] | 1
|
2020-06-03T21:43:29.000Z
|
2020-06-03T21:43:29.000Z
|
HyperUnmixing/visualization.py
|
mdbresh/HyperUnmixing
|
9ed1be74da48ff80298099497194efa2e97b7fbe
|
[
"MIT"
] | 8
|
2020-04-13T22:12:03.000Z
|
2020-05-01T21:37:16.000Z
|
HyperUnmixing/visualization.py
|
mdbresh/HyperUnmixing
|
9ed1be74da48ff80298099497194efa2e97b7fbe
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import ipywidgets as widgets
import matplotlib.pyplot as plt
from skimage.measure import label, regionprops, regionprops_table
from skimage.color import label2rgb
def Wav_2_Im(im, wn):
'''
Input a 3-D datacube and outputs a normalized slice at one wavenumber.
Parameters
----------
im : array-like image.
Input data.
wn : integer.
Integer index value.
Returns
----------
slice : ndarray.
An image the same size as the input, but with one slice in wavenumber space.
'''
normalized = [] # storage for each normalized slice
img_norm = np.empty(im.shape, dtype=np.float32)
for i in np.linspace(0, im.shape[2]-1, im.shape[2]-1).astype(np.int):
image = im[:,:,i]
normalized.append((image - np.min(image))/(np.amax(image) - np.min(image)))
for i in np.linspace(0, im.shape[2]-1, im.shape[2]-1).astype(np.int):
img_norm[:,:,i] = normalized[i-1]
im_slice = img_norm[:,:,wn-750]
return im_slice
def AreaFraction(im, norm_im, image_size):
'''
Input test image, normalized NMF coefficients image, and image size.
Outputs a dictionary of computed properties for regions of interest,
a multidimensional array containing threshold masks, and a list of
computed area fractions for the areas of interest in each threshold mask.
Parameters
----------
im : array-like image.
Image slice to measure.
norm_im : multidimensional array-like image
Image of normalized NMF coefficients.
image_size : integer.
Size of the image.
Returns
---------
regions : dict.
Dictionary of regions of interest and their computed properties.
mask : multidimensional array-like image.
Multidimensional array with each threshold mask image.
area_frac : list.
List of computed area fractions of DPPDTT.
'''
# Set up threshold masks
percents = np.round(np.arange(0.5, 1.0, 0.05),2) # array of thresholds
mask = np.zeros((norm_im.shape[0], norm_im.shape[1], 10)) # ten tested thresholds
for h in range(mask.shape[2]):
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if norm_im[i][j] >= percents[h]:
mask[i][j][h] = 1
else:
mask[i][j][h] = 0
# Compute region properties of labeled images
regions = {}
props = ('area', 'major_axis_length', 'minor_axis_length', 'mean_intensity')
for i in range(mask.shape[2]):
labels = label(mask[:,:,i])
regions[i] = pd.DataFrame(regionprops_table(labels, im, props))
# Compute the area fractions
area_frac = []
for i in range(len(regions.keys())):
area_frac.append(regions[i]['area'].values / image_size**2)
return regions, mask, area_frac
def interactive_hyperimage(image, w=(750,1877,1)):
'''
input:
image: 3D Hyperspectral image
w: wavenumbers, which is desired interval
format is (starting wavenumber, ending wavenumber, step). Default is full spectrum, which is (750,1128,1)
output:
interactive 2D image of hyperspectral image at desired wavenumber
'''
def update(a):
fig, ax = plt.subplots(figsize=(6,6))
ax.imshow(image[ :, :,a-750])
ax.set_title('Wavenumber '+str(a)+' $\mathregular{cm^{-1}}$', fontsize=24)
return widgets.interact(update, a=w)
| 25.277778
| 107
| 0.690424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,652
| 0.518681
|
3f3d41979c4cb0e2058dd2cdc43c80be671cc4fb
| 2,758
|
py
|
Python
|
velocileptors/Utils/loginterp.py
|
kokron/velocileptors
|
50016dd66ec9a2d33effecc248a48ca7ea7322bf
|
[
"MIT"
] | null | null | null |
velocileptors/Utils/loginterp.py
|
kokron/velocileptors
|
50016dd66ec9a2d33effecc248a48ca7ea7322bf
|
[
"MIT"
] | null | null | null |
velocileptors/Utils/loginterp.py
|
kokron/velocileptors
|
50016dd66ec9a2d33effecc248a48ca7ea7322bf
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as interpolate
from scipy.misc import derivative
import inspect
def loginterp(x, y, yint = None, side = "both", lorder = 9, rorder = 9, lp = 1, rp = -2,
ldx = 1e-6, rdx = 1e-6,\
interp_min = -12, interp_max = 12, Nint = 10**5, verbose=False, option='B'):
'''
Extrapolate function by evaluating a log-index of left & right side.
From Chirag Modi's CLEFT code at
https://github.com/modichirag/CLEFT/blob/master/qfuncpool.py
The warning for divergent power laws on both ends is turned off. To turn back on uncomment lines 26-33.
'''
if yint is None:
yint = interpolate(x, y, k = 5)
if side == "both":
side = "lr"
# Make sure there is no zero crossing between the edge points
# If so assume there can't be another crossing nearby
if np.sign(y[lp]) == np.sign(y[lp-1]) and np.sign(y[lp]) == np.sign(y[lp+1]):
l = lp
else:
l = lp + 2
if np.sign(y[rp]) == np.sign(y[rp-1]) and np.sign(y[rp]) == np.sign(y[rp+1]):
r = rp
else:
r = rp - 2
lneff = derivative(yint, x[l], dx = x[l]*ldx, order = lorder)*x[l]/y[l]
rneff = derivative(yint, x[r], dx = x[r]*rdx, order = rorder)*x[r]/y[r]
#print(lneff, rneff)
# uncomment if you like warnings.
#if verbose:
# if lneff < 0:
# print( 'In function - ', inspect.getouterframes( inspect.currentframe() )[2][3])
# print('WARNING: Runaway index on left side, bad interpolation. Left index = %0.3e at %0.3e'%(lneff, x[l]))
# if rneff > 0:
# print( 'In function - ', inspect.getouterframes( inspect.currentframe() )[2][3])
# print('WARNING: Runaway index on right side, bad interpolation. Reft index = %0.3e at %0.3e'%(rneff, x[r]))
if option == 'A':
xl = np.logspace(interp_min, np.log10(x[l]), Nint)
xr = np.logspace(np.log10(x[r]), interp_max, Nint)
yl = y[l]*(xl/x[l])**lneff
yr = y[r]*(xr/x[r])**rneff
#print(xr/x[r])
xint = x[l+1:r].copy()
yint = y[l+1:r].copy()
if side.find("l") > -1:
xint = np.concatenate((xl, xint))
yint = np.concatenate((yl, yint))
if side.find("r") > -1:
xint = np.concatenate((xint, xr))
yint = np.concatenate((yint, yr))
yint2 = interpolate(xint, yint, k = 5, ext=3)
else:
yint2 = lambda xx: (xx <= x[l]) * y[l]*(xx/x[l])**lneff \
+ (xx >= x[r]) * y[r]*(xx/x[r])**rneff \
+ (xx > x[l]) * (xx < x[r]) * interpolate(x, y, k = 5, ext=3)(xx)
return yint2
| 37.27027
| 120
| 0.536983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 971
| 0.352067
|
3f3d5388905e53963d743e54574d98fe526396ec
| 4,458
|
py
|
Python
|
src/modeling/calc_target_scale.py
|
pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution
|
0bc51075db31a747eeebb7f4775a3cd26ad5f870
|
[
"MIT"
] | 44
|
2020-12-09T06:15:15.000Z
|
2022-03-31T02:37:47.000Z
|
src/modeling/calc_target_scale.py
|
pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution
|
0bc51075db31a747eeebb7f4775a3cd26ad5f870
|
[
"MIT"
] | null | null | null |
src/modeling/calc_target_scale.py
|
pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution
|
0bc51075db31a747eeebb7f4775a3cd26ad5f870
|
[
"MIT"
] | 7
|
2020-12-09T10:08:32.000Z
|
2021-08-17T01:53:51.000Z
|
from typing import Tuple
import dataclasses
import numpy as np
import torch
from pathlib import Path
from l5kit.data import LocalDataManager, ChunkedDataset
import sys
import os
from tqdm import tqdm
sys.path.append(os.pardir)
sys.path.append(os.path.join(os.pardir, os.pardir))
from lib.evaluation.mask import load_mask_chopped
from lib.rasterization.rasterizer_builder import build_custom_rasterizer
from lib.dataset.faster_agent_dataset import FasterAgentDataset
from lib.utils.yaml_utils import save_yaml, load_yaml
from modeling.load_flag import load_flags, Flags
def calc_target_scale(agent_dataset, n_sample: int = 10000) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
sub_indices = np.linspace(0, len(agent_dataset) - 1, num=n_sample, dtype=np.int64)
pos_list = []
for i in tqdm(sub_indices):
d = agent_dataset[i]
pos = d["target_positions"]
pos[~d["target_availabilities"].astype(bool)] = np.nan
pos_list.append(pos)
agents_pos = np.array(pos_list)
target_scale_abs_mean = np.nanmean(np.abs(agents_pos), axis=0)
target_scale_abs_max = np.nanmax(np.abs(agents_pos), axis=0)
target_scale_std = np.nanstd(agents_pos, axis=0)
return target_scale_abs_mean, target_scale_abs_max, target_scale_std
if __name__ == '__main__':
mode = ""
flags: Flags = load_flags(mode=mode)
flags_dict = dataclasses.asdict(flags)
cfg = load_yaml(flags.cfg_filepath)
out_dir = Path(flags.out_dir)
print(f"cfg {cfg}")
os.makedirs(str(out_dir), exist_ok=True)
print(f"flags: {flags_dict}")
save_yaml(out_dir / 'flags.yaml', flags_dict)
save_yaml(out_dir / 'cfg.yaml', cfg)
debug = flags.debug
# set env variable for data
os.environ["L5KIT_DATA_FOLDER"] = flags.l5kit_data_folder
dm = LocalDataManager(None)
print("init dataset")
train_cfg = cfg["train_data_loader"]
valid_cfg = cfg["valid_data_loader"]
# Build StubRasterizer for fast dataset access
cfg["raster_params"]["map_type"] = "stub_debug"
rasterizer = build_custom_rasterizer(cfg, dm)
print("rasterizer", rasterizer)
train_path = "scenes/sample.zarr" if debug else train_cfg["key"]
train_agents_mask = None
if flags.validation_chopped:
# Use chopped dataset to calc statistics...
num_frames_to_chop = 100
th_agent_prob = cfg["raster_params"]["filter_agents_threshold"]
min_frame_future = 1
num_frames_to_copy = num_frames_to_chop
train_agents_mask = load_mask_chopped(
dm.require(train_path), th_agent_prob, num_frames_to_copy, min_frame_future)
print("train_path", train_path, "train_agents_mask", train_agents_mask.shape)
train_zarr = ChunkedDataset(dm.require(train_path)).open(cached=False)
print("train_zarr", type(train_zarr))
print(f"Open Dataset {flags.pred_mode}...")
train_agent_dataset = FasterAgentDataset(
cfg, train_zarr, rasterizer, min_frame_history=flags.min_frame_history,
min_frame_future=flags.min_frame_future, agents_mask=train_agents_mask
)
print("train_agent_dataset", len(train_agent_dataset))
n_sample = 1_000_000 # Take 1M sample.
target_scale_abs_mean, target_scale_abs_max, target_scale_std = calc_target_scale(train_agent_dataset, n_sample)
chopped_str = "_chopped" if flags.validation_chopped else ""
agent_prob = cfg["raster_params"]["filter_agents_threshold"]
filename = f"target_scale_abs_mean_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz"
cache_path = Path(train_zarr.path) / filename
np.savez_compressed(cache_path, target_scale=target_scale_abs_mean)
print("Saving to ", cache_path)
filename = f"target_scale_abs_max_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz"
cache_path = Path(train_zarr.path) / filename
np.savez_compressed(cache_path, target_scale=target_scale_abs_max)
print("Saving to ", cache_path)
filename = f"target_scale_std_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz"
cache_path = Path(train_zarr.path) / filename
np.savez_compressed(cache_path, target_scale=target_scale_std)
print("Saving to ", cache_path)
print("target_scale_abs_mean", target_scale_abs_mean)
print("target_scale_abs_max", target_scale_abs_max)
print("target_scale_std", target_scale_std)
import IPython; IPython.embed()
| 40.527273
| 120
| 0.746074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 987
| 0.2214
|
3f3d9f5e7b52389bd248948394c8302b4b2c0b67
| 3,679
|
py
|
Python
|
examples/solvers using low level utilities/interior_laplace_neumann_panel_polygon.py
|
dbstein/pybie2d
|
1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58
|
[
"Apache-2.0"
] | 11
|
2018-10-26T17:34:29.000Z
|
2020-04-27T21:21:33.000Z
|
examples/solvers using low level utilities/interior_laplace_neumann_panel_polygon.py
|
dbstein/pybie2d
|
1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58
|
[
"Apache-2.0"
] | null | null | null |
examples/solvers using low level utilities/interior_laplace_neumann_panel_polygon.py
|
dbstein/pybie2d
|
1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import scipy as sp
import scipy.sparse
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.path
plt.ion()
import pybie2d
"""o solve an interior Modified Helmholtz problem
On a complicated domain using a global quadr
Demonstrate how to use the pybie2d package tature
This example demonstrates how to do this entirely using low-level routines,
To demonstrate both how to use these low level routines
And to give you an idea what is going on under the hood in the
higher level routines
"""
NG = 1000
h_max = 0.01
# extract some functions for easy calling
squish = pybie2d.misc.curve_descriptions.squished_circle
PPB = pybie2d.boundaries.panel_polygon_boundary.panel_polygon_boundary.Panel_Polygon_Boundary
Grid = pybie2d.grid.Grid
PointSet = pybie2d.point_set.PointSet
Laplace_Layer_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Form
Laplace_Layer_Singular_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Form
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
################################################################################
# define problem
# boundary
boundary = PPB([0,1,1,0], [0,0,1,1], [h_max]*4, [True]*4, dyadic_levels=20, dyadic_base=3)
# solution
solution_func = lambda x, y: 2*x + y
bc = solution_func(boundary.x, boundary.y)
bcx = lambda x, y: 2.0*np.ones_like(x)
bcy = lambda x, y: 1.0*np.ones_like(x)
bcn = lambda x, y, nx, ny: bcx(x, y)*nx + bcy(x, y)*ny
def err_plot(u):
# compute the error
error = u - solution_func(gridp.xg, gridp.yg)
digits = -np.log10(np.abs(error)+1e-16)
mdigits = np.ma.array(digits)
# plot the error as a function of space (only good in interior)
fig, ax = plt.subplots(1,1)
clf = ax.imshow(mdigits[:,::-1].T, extent=[0,1,0,1],
cmap=mpl.cm.viridis_r)
ax.set_aspect('equal')
fig.colorbar(clf)
print('Error: {:0.2e}'.format(np.abs(error).max()))
################################################################################
##### solve problem the hard way ###############################################
################################################################################
################################################################################
# find physical region
# (this implements a fast way to tell if points are in or out of the boundary)
# (and of course, for the squish boundary, we could easily figure out something
# faster, but this illustrates a general purpose routine)
gridp = Grid([0,1], NG, [0,1], NG, x_endpoints=[False,False], y_endpoints=[False,False])
################################################################################
# solve for the density
DLP = Laplace_Layer_Singular_Form(boundary, ifdipole=True)
SLPp = (DLP/boundary.weights).T*boundary.weights
A = 0.5*np.eye(boundary.N) + SLPp
tau = np.linalg.solve(A, bcn(boundary.x, boundary.y, boundary.normal_x, boundary.normal_y))
# fix the mean
target = PointSet(x=np.array((0.5)),y=np.array((0.5)))
good_eval = Laplace_Layer_Apply(boundary, target=target, charge=tau)
correction = (2*0.5 + 0.5) - good_eval
################################################################################
# naive evaluation
u = Laplace_Layer_Apply(boundary, gridp, charge=tau)
u = gridp.reshape(u)
u += correction
err_plot(u)
################################################################################
# oversampled
hmax = gridp.xg[1,0] - gridp.xg[0,0]
fbdy, IMAT = boundary.prepare_oversampling(hmax/6.0)
IMAT = sp.sparse.csr_matrix(IMAT)
ftau = IMAT.dot(tau)
u = Laplace_Layer_Apply(fbdy, gridp, charge=ftau)
u = gridp.reshape(u)
u += correction
err_plot(u)
ua = 2*gridp.xg + gridp.yg
| 34.383178
| 93
| 0.618918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,498
| 0.407176
|
3f3e92f1f4a6224cee72e432180bdaba79cbc4b7
| 576
|
py
|
Python
|
controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py
|
dbscoach/webots-soccer-sim-playground
|
464f9052834d0c6896e6a960113720e8ca4e21df
|
[
"Apache-2.0"
] | null | null | null |
controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py
|
dbscoach/webots-soccer-sim-playground
|
464f9052834d0c6896e6a960113720e8ca4e21df
|
[
"Apache-2.0"
] | null | null | null |
controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py
|
dbscoach/webots-soccer-sim-playground
|
464f9052834d0c6896e6a960113720e8ca4e21df
|
[
"Apache-2.0"
] | null | null | null |
from math import ceil
from referee.consts import MATCH_TIME, TIME_STEP
from referee.referee import RCJSoccerReferee
referee = RCJSoccerReferee(
match_time=MATCH_TIME,
progress_check_steps=ceil(15/(TIME_STEP/1000.0)),
progress_check_threshold=0.5,
ball_progress_check_steps=ceil(10/(TIME_STEP/1000.0)),
ball_progress_check_threshold=0.5,
)
while referee.step(TIME_STEP) != -1:
referee.emit_positions()
if not referee.tick():
break
# When end of match, pause simulator immediately
referee.simulationSetMode(referee.SIMULATION_MODE_PAUSE)
| 27.428571
| 58
| 0.770833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.083333
|
3f3edf95fac5cc6b31cb7effd1e2b59006a53ab6
| 4,675
|
py
|
Python
|
backend/app.py
|
CMU-IDS-2020/fp-profiler
|
45edb7c5f5dfcf34854057476558793bc877f031
|
[
"BSD-3-Clause"
] | null | null | null |
backend/app.py
|
CMU-IDS-2020/fp-profiler
|
45edb7c5f5dfcf34854057476558793bc877f031
|
[
"BSD-3-Clause"
] | null | null | null |
backend/app.py
|
CMU-IDS-2020/fp-profiler
|
45edb7c5f5dfcf34854057476558793bc877f031
|
[
"BSD-3-Clause"
] | 1
|
2020-11-20T02:56:20.000Z
|
2020-11-20T02:56:20.000Z
|
from flask import Flask, request
import os
from subprocess import Popen, PIPE
import json
from prof_file_util import load_source, load_line_profile, load_graph_profile
from linewise_barchart import linewise_barchart
from valgrind import extract_valgrind_result
from mem_issue_visualize import mem_issue_visualize
app = Flask(__name__)
@app.route('/upload-file', methods = ['POST'])
def hello():
'''
shall return a json dict
if succeeds,
{
'error': 0,
'vega_json': ...
'node_json': ...
'edge_json': ...
...
}
if fails,
{
'error': 1,
'source': formatted source code,
'error_message': the compile failure message
}
'''
code = request.get_json()['code']
# print(code)
local_path = 'temp.c' # TODO: hash file names to handle concurrency issues
with open(local_path, 'w') as f:
f.write(code)
process = Popen(['wc', '-l', local_path], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
# print(output)
# with open('test.json') as f:
# s = json.load(f)
ret_dict = {}
'''
Invoke compiler (if need) and profiler to generate the results.
'''
os.system('clang-format -i {}'.format(local_path))
compile_retvalue = os.system('gcc -g -pg {} -o prog 1> gcc_output 2>&1'.format(local_path))
# handle compiling error
if compile_retvalue != 0:
ret_dict['error'] = 1
ret_dict['source'] = ''.join(list(open(local_path, 'r').readlines()))
ret_dict['error_message'] = ''.join(list(open('gcc_output', 'r').readlines()))
return ret_dict
os.system('./prog')
os.system('ctags --fields=+ne -o - --sort=no {} 1> ctags_output 2>&1'.format(local_path))
os.system('gprof --graph prog gmon.out 1> graph_file 2>&1')
os.system('gprof -l prog gmon.out 1> linewise_file 2>&1')
'''
Now we have the outputs. Visualize and pass it back to the frontend.
'''
# for debug purpose. Only linux can host grof so far.
ret_dict['error'] = 0
if os.path.isfile('linewise_file') and os.path.getsize('linewise_file') > 0\
and os.path.isfile('graph_file') and os.path.getsize('graph_file') > 0:
df = load_line_profile(local_path, 'linewise_file')
chart = linewise_barchart(df)
# chart.save('new.json')
'''
TODO: Maybe the temporary files should be cleared or
stored somewhere serving as history data.
'''
ret_dict['vega_json'] = json.loads(chart.to_json())
graph_dct = load_graph_profile('graph_file')
if graph_dct:
for k, v in graph_dct.items():
ret_dict[k] = v
else:
ret_dict['vega_json'] = json.load(open('test.json', 'r'))
# print(uninitialised_buffer, invalid_write_buffer, mem_leak_dic)
return ret_dict
@app.route('/mem-profile', methods = ['POST'])
def mem_profile():
'''
shall return a json dict
if succeeds,
{
'error': 0,
'vega_json': ...
...
}
if fails,
{
'error': 1,
'source': formatted source code,
'error_message': the compile failure message
}
'''
code = request.get_json()['code']
# print(code)
local_path = 'temp.c' # TODO: hash file names to handle concurrency issues
with open(local_path, 'w') as f:
f.write(code)
process = Popen(['wc', '-l', local_path], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
# print(output)
# with open('test.json') as f:
# s = json.load(f)
ret_dict = {}
'''
Invoke compiler (if need) and profiler to generate the results.
'''
os.system('clang-format -i {}'.format(local_path))
compile_retvalue = os.system('gcc -pedantic -g {} -o exec 1> gcc_output 2>&1'.format(local_path))
if compile_retvalue != 0:
ret_dict['error'] = 1
ret_dict['source'] = ''.join(list(open(local_path, 'r').readlines()))
ret_dict['error_message'] = ''.join(list(open('gcc_output', 'r').readlines()))
return ret_dict
os.system('valgrind ./exec > valgrind.txt 2>&1')
uninitialised_buffer, invalid_write_buffer = extract_valgrind_result('other', 'valgrind.txt')
os.system('valgrind --leak-check=full ./exec > valgrind_leak.txt 2>&1')
mem_leak_dic = extract_valgrind_result('memory_leak', 'valgrind_leak.txt')
ret_dict['error'] = 0
vega_chart = mem_issue_visualize(local_path, uninitialised_buffer, invalid_write_buffer, mem_leak_dic)
ret_dict['vega_json'] = json.loads(vega_chart.to_json())
return ret_dict
| 31.802721
| 106
| 0.620535
| 0
| 0
| 0
| 0
| 4,328
| 0.925775
| 0
| 0
| 2,159
| 0.461818
|
3f3f001f639e3ff68f19c91e138db8007658913f
| 998
|
py
|
Python
|
py/book/ShortestSubarrayLength.py
|
danyfang/SourceCode
|
8168f6058648f2a330a7354daf3a73a4d8a4e730
|
[
"MIT"
] | null | null | null |
py/book/ShortestSubarrayLength.py
|
danyfang/SourceCode
|
8168f6058648f2a330a7354daf3a73a4d8a4e730
|
[
"MIT"
] | null | null | null |
py/book/ShortestSubarrayLength.py
|
danyfang/SourceCode
|
8168f6058648f2a330a7354daf3a73a4d8a4e730
|
[
"MIT"
] | null | null | null |
'''
Leetcode problem No 862 Shortest Subarray with Sum at Least K
Solution written by Xuqiang Fang on 1 July, 2018
'''
import collections
class Solution(object):
def shortestSubarray(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
n = len(A)
B = [0] * (n + 1)
for i in range(n):
B[i+1] = B[i] + A[i]
d = collections.deque()
ans = n + 1
for i in range(n+1):
while d and B[i] - B[d[0]] >= K:
ans = min(ans, i-d.popleft())
while d and B[i] <= B[d[-1]]:
d.pop()
d.append(i)
return ans if ans <= n else -1
def main():
s = Solution()
print(s.shortestSubarray([2,-1,2], 3))
print(s.shortestSubarray([1,2], 4))
print(s.shortestSubarray([1], 1))
print(s.shortestSubarray([1,2,3,-5,4,-7,5,-8,6,-9,7,8,-4], 5)) #1
print(s.shortestSubarray([1,2,-5,3,-5,4,-7,5,-8,6,-9,7,8,-4], 5))
main()
| 28.514286
| 69
| 0.490982
| 559
| 0.56012
| 0
| 0
| 0
| 0
| 0
| 0
| 203
| 0.203407
|
3f3f1b80d7db0ba49872fa346d7180fa077d1cab
| 2,692
|
py
|
Python
|
djangocms_baseplugins/contact/models.py
|
benzkji/djangocms-baseplugins
|
7f041a030ed93dcdec70e4ca777b841846b8f2f2
|
[
"MIT"
] | 2
|
2019-04-14T01:31:22.000Z
|
2020-03-05T13:06:57.000Z
|
djangocms_baseplugins/contact/models.py
|
benzkji/djangocms-baseplugins
|
7f041a030ed93dcdec70e4ca777b841846b8f2f2
|
[
"MIT"
] | 32
|
2017-04-04T09:28:06.000Z
|
2021-08-18T16:23:02.000Z
|
djangocms_baseplugins/contact/models.py
|
bnzk/djangocms-baseplugins
|
7f041a030ed93dcdec70e4ca777b841846b8f2f2
|
[
"MIT"
] | null | null | null |
import time
from ckeditor.fields import RichTextField
from django.db import models
from django.utils.translation import ugettext_lazy as _
from requests import ConnectionError
from djangocms_baseplugins.baseplugin.models import AbstractBasePlugin
from djangocms_baseplugins.baseplugin.utils import check_migration_modules_needed
check_migration_modules_needed('contact')
class ContactBase(AbstractBasePlugin):
website = models.URLField(_("Website"), blank=True, default='')
email = models.EmailField(_("Email"), blank=True, default='')
phone = models.CharField(_("Phone"), max_length=64, blank=True, default='')
fax = models.CharField(_("Fax"), max_length=64, blank=True, default='')
body = RichTextField(_("Text"), blank=True, default='')
address = models.TextField(_('Address'), default='', blank=True)
geocoding_address = models.CharField(
_('Address for the map'),
max_length=64,
default='',
blank=True,
)
lat = models.FloatField(blank=True, default=0, null=True)
lng = models.FloatField(blank=True, default=0, null=True)
geo_error = models.BooleanField(_("Probleme mit der Adresse?"), default=False)
class Meta:
abstract = True
def __str__(self):
text = str(_("Contact / Subsidiary"))
if self.geo_error:
text = "%s (%s)" % (text, _("Coordinates Error!"))
return self.add_hidden_flag(text)
class Contact(ContactBase):
def save(self, *args, **kwargs):
"""
here for now. may end in a metaclass, if we haz time to do this
"""
try:
import geocoder
except ImportError:
return super(Contact, self).save(*args, **kwargs)
try:
from_db = Contact.objects.get(id=self.id)
except self.DoesNotExist:
from_db = Contact()
if self.geocoding_address:
if not self.lat or not from_db.geocoding_address == self.geocoding_address:
g = None
try:
g = geocoder.komoot(self.geocoding_address)
time.sleep(2)
except ConnectionError:
pass
if g and g.ok:
self.lat = g.latlng[0]
self.lng = g.latlng[1]
self.geo_error = False
else:
self.geo_error = True
if not self.lat:
# print "no latlng found: %s" % self
self.geo_error = True
else:
self.geo_error = False
self.lat = 0
self.lng = 0
return super(Contact, self).save(*args, **kwargs)
| 34.075949
| 87
| 0.593239
| 2,311
| 0.85847
| 0
| 0
| 0
| 0
| 0
| 0
| 288
| 0.106984
|
3f3fcc2c16b2bfd7c2cf31951c3290a8d5c5992d
| 355
|
py
|
Python
|
Level1/Lessons76501/minari-76501.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons76501/minari-76501.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level1/Lessons76501/minari-76501.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | 1
|
2021-04-05T07:35:59.000Z
|
2021-04-05T07:35:59.000Z
|
def solution(absolutes, signs):
answer = 0
for i in range(len(absolutes)):
if signs[i] is True:
answer += int(absolutes[i])
else:
answer -= int(absolutes[i])
return answer
#1. for문 (len(absolutes)), if signs[i] is true: answer += absolutes[i], else: answer -= absolutes[i]
#2. sum(absolutes)
| 29.583333
| 100
| 0.571831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.336134
|
3f40172291607ab0c848f7f1917399766b9b515c
| 1,082
|
py
|
Python
|
pyexcel/__init__.py
|
quis/pyexcel
|
e02f5ff871ba69184d3fb85fa8960da4e883ebdc
|
[
"BSD-3-Clause"
] | null | null | null |
pyexcel/__init__.py
|
quis/pyexcel
|
e02f5ff871ba69184d3fb85fa8960da4e883ebdc
|
[
"BSD-3-Clause"
] | null | null | null |
pyexcel/__init__.py
|
quis/pyexcel
|
e02f5ff871ba69184d3fb85fa8960da4e883ebdc
|
[
"BSD-3-Clause"
] | null | null | null |
"""
pyexcel
~~~~~~~~~~~~~~~~~~~
**pyexcel** is a wrapper library to read, manipulate and
write data in different excel formats: csv, ods, xls, xlsx
and xlsm. It does not support formulas, styles and charts.
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
# flake8: noqa
from .cookbook import (
merge_csv_to_a_book,
merge_all_to_a_book,
split_a_book,
extract_a_sheet_from_a_book,
)
from .core import (
get_array,
iget_array,
get_dict,
get_records,
iget_records,
get_book_dict,
get_sheet,
get_book,
iget_book,
save_as,
isave_as,
save_book_as,
isave_book_as,
)
from .book import Book
from .sheet import Sheet
from .internal.garbagecollector import free_resources
from .deprecated import (
load_book,
load_book_from_memory,
load,
load_from_memory,
load_from_dict,
load_from_records,
Reader,
SeriesReader,
ColumnSeriesReader,
BookReader,
)
from .__version__ import __version__, __author__
| 21.64
| 62
| 0.686691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.330869
|
3f4174ff9c8ef5d53e5df7cf324c378ca2b1ce02
| 2,729
|
py
|
Python
|
tests/resources/selenium/test_nfc.py
|
Avi-Labs/taurus
|
3aa9bc294778d99be545575467fb5897dc815330
|
[
"Apache-2.0"
] | 1,743
|
2015-03-30T20:56:03.000Z
|
2022-03-31T09:08:37.000Z
|
tests/resources/selenium/test_nfc.py
|
Avi-Labs/taurus
|
3aa9bc294778d99be545575467fb5897dc815330
|
[
"Apache-2.0"
] | 1,159
|
2015-04-01T08:25:53.000Z
|
2022-03-29T08:15:31.000Z
|
tests/resources/selenium/test_nfc.py
|
Avi-Labs/taurus
|
3aa9bc294778d99be545575467fb5897dc815330
|
[
"Apache-2.0"
] | 497
|
2015-03-31T21:05:18.000Z
|
2022-03-17T12:45:21.000Z
|
# coding=utf-8
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from bzt.resources.selenium_extras import waiter, get_locator
class TestSc1(unittest.TestCase):
def setUp(self):
self.vars = {}
timeout = 2.0
options = webdriver.FirefoxOptions()
profile = webdriver.FirefoxProfile()
profile.set_preference('webdriver.log.file', '/somewhere/webdriver.log')
options.set_capability('unhandledPromptBehavior', 'ignore')
self.driver = webdriver.Firefox(profile, options=options)
self.driver.implicitly_wait(timeout)
apiritif.put_into_thread_store(timeout=timeout, func_mode=False,
driver=self.driver, windows={}, scenario_name='sc1')
def _1_httpsblazedemocomsetup1(self):
with apiritif.smart_transaction('https://blazedemo.com/setup1'):
self.driver.get('https://blazedemo.com/setup1')
def _2_setup2(self):
with apiritif.smart_transaction('setup2'):
self.driver.get('https://blazedemo.com/setup2')
waiter()
def _3_httpsblazedemocommain1(self):
with apiritif.smart_transaction('https://blazedemo.com/main1'):
self.driver.get('https://blazedemo.com/main1')
def _4_main2(self):
with apiritif.smart_transaction('main2'):
self.driver.get('https://blazedemo.com/main2')
waiter()
def _5_httpsblazedemocomteardown1(self):
with apiritif.smart_transaction('https://blazedemo.com/teardown1'):
self.driver.get('https://blazedemo.com/teardown1')
def _6_teardown2(self):
with apiritif.smart_transaction('teardown2'):
self.driver.get('https://blazedemo.com/teardown2')
waiter()
def test_sc1(self):
try:
self._1_httpsblazedemocomsetup1()
self._2_setup2()
self._3_httpsblazedemocommain1()
self._4_main2()
finally:
apiritif.set_stage("teardown") # can't be interrupted
self._5_httpsblazedemocomteardown1()
self._6_teardown2()
def tearDown(self):
if self.driver:
self.driver.quit()
| 32.488095
| 91
| 0.681568
| 2,069
| 0.758153
| 0
| 0
| 0
| 0
| 0
| 0
| 432
| 0.1583
|
3f418694dc8e68bdf0bfc91861f5c5eb0502eab0
| 5,495
|
py
|
Python
|
src/onegov/search/dsl.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/search/dsl.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/search/dsl.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from elasticsearch_dsl import Search as BaseSearch
from elasticsearch_dsl.response import Hit as BaseHit
from elasticsearch_dsl.response import Response as BaseResponse
def type_from_hit(hit):
return hit.meta.index.split('-')[-2]
class Search(BaseSearch):
""" Extends elastichsearch_dsl's search object with ORM integration.
Works exactly the same as the original, but the results it returns offer
additional methods to query the SQLAlchemy models behind the results (if
any).
"""
def __init__(self, *args, **kwargs):
# get the session and mapping if possilbe (not provided during cloning)
self.session = kwargs.pop('session', None)
self.mappings = kwargs.pop('mappings', None)
super().__init__(*args, **kwargs)
# bind responses to the orm
self._response_class = Response.bind(
self.session, self.mappings, self.explain)
@property
def explain(self):
return self._extra.get('explain', False)
def _clone(self):
search = super()._clone()
search.session = self.session
search.mappings = self.mappings
return search
def _get_result(self, *args, **kwargs):
result = super()._get_result(*args, **kwargs)
result.__class__ = Hit.bind(
session=self.session,
model=self.mappings[type_from_hit(result)].model
)
return result
class Response(BaseResponse):
""" Extends the default response (list of results) with additional
methods to query the SQLAlchemy models behind the results.
"""
@classmethod
def bind(cls, session, mappings, explain):
class BoundResponse(cls):
pass
BoundResponse.session = session
BoundResponse.mappings = mappings
BoundResponse.explain = explain
return BoundResponse
def hits_by_type(self, type):
for hit in self.hits:
if type_from_hit(hit) == type:
yield hit
def query(self, type):
""" Returns an SQLAlchemy query for the given type. You must provide
a type, because a query can't consist of multiple unrelated tables.
If no results match the type, None is returned.
"""
hits = list(self.hits_by_type(type))
if not hits:
return None
model = self.mappings[type].model
query = self.session.query(model)
model_ids = (h.meta.id for h in hits)
query = query.filter(getattr(model, model.es_id).in_(model_ids))
return query
def load(self):
""" Loads all results by querying the SQLAlchemy session in the order
they were returned by elasticsearch.
Note that the resulting lists may include None values, since we are
might get elasticsearch results for which we do not have a model
on the database (the data is then out of sync).
"""
positions = {}
types = set()
# put the types into buckets and store the original position...
for ix, hit in enumerate(self.hits):
type = type_from_hit(hit)
positions[(type, str(hit.meta.id))] = ix
types.add(type)
results = [None] * len(positions)
# ...so we can query the database once per type and not once per result
# this has the potential of resulting in fewer queries
for type in types:
for result in self.query(type):
object_id = str(getattr(result, result.es_id))
ix = positions[(type, object_id)]
if self.explain:
ex = self.hits[ix].meta.explanation
result.explanation = {
'raw': ex.__dict__,
'score': self.hits[ix].meta.score,
'term-frequency': explanation_value(
ex, 'termFreq'
),
'inverse-document-frequency': explanation_value(
ex, 'idf'
),
'field-norm': explanation_value(
ex, 'fieldNorm'
)
}
results[ix] = result
return results
def explanation_value(explanation, text):
""" Gets the value from the explanation for descriptions starting with
the given text.
"""
if explanation.description.startswith(text):
return {
'description': explanation.description,
'value': explanation.value
}
for detail in getattr(explanation, 'details', []):
result = explanation_value(detail, text)
if result:
return result
class Hit(BaseHit):
""" Extends a single result with additional methods to query the SQLAlchemy
models behind the results.
"""
@classmethod
def bind(cls, model, session):
class BoundHit(cls):
pass
BoundHit.model = model
BoundHit.session = session
return BoundHit
def query(self):
""" Returns the SQLAlchemy query for this result. """
query = self.session.query(self.model)
model_id = getattr(self.model, self.model.es_id)
query = query.filter(model_id == self.meta.id)
return query
def load(self):
""" Loads this result from the SQLAlchemy session. """
return self.query().one()
| 28.471503
| 79
| 0.589263
| 4,783
| 0.870428
| 128
| 0.023294
| 531
| 0.096633
| 0
| 0
| 1,688
| 0.307188
|
3f4197885d65ac6c21aa8108e7b1eaac4d9a1a2e
| 3,862
|
py
|
Python
|
DistributedStorageBenchmarkTool/EchoHandler.py
|
shadoobie/dbench
|
0cca504048ba918a1502482b7d06a866cda9ab6e
|
[
"MIT"
] | null | null | null |
DistributedStorageBenchmarkTool/EchoHandler.py
|
shadoobie/dbench
|
0cca504048ba918a1502482b7d06a866cda9ab6e
|
[
"MIT"
] | null | null | null |
DistributedStorageBenchmarkTool/EchoHandler.py
|
shadoobie/dbench
|
0cca504048ba918a1502482b7d06a866cda9ab6e
|
[
"MIT"
] | null | null | null |
from SocketServer import BaseRequestHandler, TCPServer
from DistributedStorageBenchmarkTool.StampyMcGetTheLog import StampyMcGetTheLog
# from sets import Set
import re
class EchoHandler(BaseRequestHandler):
name = None
server = None
chunkSizeWriteTimes = []
chunkSizeSet = set()
def __init__(self, request, client_address, server):
self.server = server
self.name = "EchoHandlerFor client " + str(client_address)
self.server.flood("EchoHandler names " + self.name + " has been instantiated.")
BaseRequestHandler.__init__(self, request, client_address, server)
def handle(self):
self.server.flood(self.name + " handle() invoked.")
while True:
receivedData = self.request.recv(8192)
self.server.flood("EchoHandler " + self.name + " receivedData = " + str(receivedData))
if not receivedData: break
self.request.sendall(receivedData)
self.request.close()
self.server.flood(self.name + " handel() has completed.")
def lookForStuff(self, parsedData):
'''this whole lookForStuff approach is wrong. please forgive me for the code the ensues. i did it wrong.'''
lifeSpan = self.getLifeSpan(parsedData)
maxFileSizeString = self.getMaxFileSizeString(parsedData)
self.gatherChunkSizeAndWriteTimeMessages(parsedData)
if lifeSpan != None and maxFileSizeString != None:
maxFileSize = int(maxFileSizeString)
for aChunkSize in self.chunkSizeSet:
self.evaluateForRolloverCompliant(lifeSpan, maxFileSize, aChunkSize)
def getClientName(self, parsedData):
clientName = None
if "clientName:" in parsedData:
clientName = parsedData[1]
return clientName
def getLifeSpan(self, parsedData):
lifeSpan = None
if "lifeSpan:" in parsedData:
lifeSpan = parsedData[2]
else:
lifeSpan = self.lifeSpan
return lifeSpan
def getMaxFileSizeString(self, parsedData):
maxFileSize = None
if "maxFileSize:" in parsedData:
maxFileSize = parsedData[9]
return maxFileSize
def gatherChunkSizeAndWriteTimeMessages(self, parsedData):
if "writeTime:" in parsedData:
self.chunkSizeWriteTimes.append({'chunkSize':int(parsedData[3]), 'writeTime':float(parsedData[1])})
self.chunkSizeSet.add(int(parsedData[3]))
def getAverageTimeBetweenWritesForChunkSize(self, chunkSize):
'''the first draft of this method probably not correct.'''
average = sum(d['writeTime'] for d in self.chunkSizeWriteTimes) / len(self.chunkSizeWriteTimes)
return average
def evaluateForRolloverCompliant(self, lifeSpan, maxFileSize, chunkSize):
numberOfSecondsRunning = lifeSpan
howManySecondsBetweenEachChunkWrite = self.getAverageTimeBetweenWritesForChunkSize(chunkSize)
numberOfChunksPerFile = maxFileSize / chunkSize
numberOfSecondsPerFile = howManySecondsBetweenEachChunkWrite * numberOfChunksPerFile
estimatedTotalFiles = numberOfSecondsRunning / numberOfSecondsPerFile
if estimatedTotalFiles <= 2:
self.server.flood(self.name + " says hey there I'm complaining that this test run will only roll over the data file an estimated " +
str(estimatedTotalFiles) + " number of files.")
else:
self.server.flood(self.name + " says it looks like we will have an estimated number of data files = " + str(estimatedTotalFiles))
def parseLine(self,line):
'''leverage regular expression to parse on space.'''
parsedLine = re.split(r'\s',line)
return parsedLine
if __name__ == '__main__':
serv = TCPServer(('', 20000), EchoHandler)
serv.serve_forever()
| 39.408163
| 144
| 0.676075
| 3,593
| 0.930347
| 0
| 0
| 0
| 0
| 0
| 0
| 678
| 0.175557
|
3f42306d062bc9168cc3334b385fbe62bb7498d6
| 14,054
|
py
|
Python
|
bitten/queue.py
|
SpamExperts/bitten
|
924ae157c876eeff7957074b0c51ed4685d4f304
|
[
"BSD-3-Clause"
] | null | null | null |
bitten/queue.py
|
SpamExperts/bitten
|
924ae157c876eeff7957074b0c51ed4685d4f304
|
[
"BSD-3-Clause"
] | 1
|
2020-09-24T05:28:44.000Z
|
2020-09-28T05:34:19.000Z
|
bitten/queue.py
|
SpamExperts/bitten
|
924ae157c876eeff7957074b0c51ed4685d4f304
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2010 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://bitten.edgewall.org/wiki/License.
"""Implements the scheduling of builds for a project.
This module provides the functionality for scheduling builds for a specific
Trac environment. It is used by both the build master and the web interface to
get the list of required builds (revisions not built yet).
Furthermore, the `BuildQueue` class is used by the build master to determine
the next pending build, and to match build slaves against configured target
platforms.
"""
from itertools import ifilter
import re
import time
from trac.util.datefmt import to_timestamp
from trac.util import pretty_timedelta, format_datetime
from trac.attachment import Attachment
from bitten.model import BuildConfig, TargetPlatform, Build, BuildStep
from bitten.util.repository import get_repos
__docformat__ = 'restructuredtext en'
def collect_changes(config, authname=None):
"""Collect all changes for a build configuration that either have already
been built, or still need to be built.
This function is a generator that yields ``(platform, rev, build)`` tuples,
where ``platform`` is a `TargetPlatform` object, ``rev`` is the identifier
of the changeset, and ``build`` is a `Build` object or `None`.
:param config: the build configuration
:param authname: the logged in user
:param db: a database connection (optional)
"""
env = config.env
repos_name, repos, repos_path = get_repos(env, config.path, authname)
with env.db_query as db:
try:
node = repos.get_node(repos_path)
except Exception, e:
env.log.warn('Error accessing path %r for configuration %r',
repos_path, config.name, exc_info=True)
return
for path, rev, chg in node.get_history():
# Don't follow moves/copies
if path != repos.normalize_path(repos_path):
break
# Stay within the limits of the build config
if config.min_rev and repos.rev_older_than(rev, config.min_rev):
break
if config.max_rev and repos.rev_older_than(config.max_rev, rev):
continue
# Make sure the repository directory isn't empty at this
# revision
old_node = repos.get_node(path, rev)
is_empty = True
for entry in old_node.get_entries():
is_empty = False
break
if is_empty:
continue
# For every target platform, check whether there's a build
# of this revision
for platform in TargetPlatform.select(env, config.name):
builds = list(Build.select(env, config.name, rev, platform.id))
if builds:
build = builds[0]
else:
build = None
yield platform, rev, build
class BuildQueue(object):
"""Enapsulates the build queue of an environment.
A build queue manages the the registration of build slaves and detection of
repository revisions that need to be built.
"""
def __init__(self, env, build_all=False, stabilize_wait=0, timeout=0):
"""Create the build queue.
:param env: the Trac environment
:param build_all: whether older revisions should be built
:param stabilize_wait: The time in seconds to wait before considering
the repository stable to create a build in the queue.
:param timeout: the time in seconds after which an in-progress build
should be considered orphaned, and reset to pending
state
"""
self.env = env
self.log = env.log
self.build_all = build_all
self.stabilize_wait = stabilize_wait
self.timeout = timeout
# Build scheduling
def get_build_for_slave(self, name, properties):
"""Check whether one of the pending builds can be built by the build
slave.
:param name: the name of the slave
:type name: `basestring`
:param properties: the slave configuration
:type properties: `dict`
:return: the allocated build, or `None` if no build was found
:rtype: `Build`
"""
self.log.debug('Checking for pending builds...')
self.reset_orphaned_builds()
# Iterate through pending builds by descending revision timestamp, to
# avoid the first configuration/platform getting all the builds
platforms = [p.id for p in self.match_slave(name, properties)]
builds_to_delete = []
build_found = False
for build in Build.select(self.env, status=Build.PENDING):
config_path = BuildConfig.fetch(self.env, name=build.config).path
_name, repos, _path = get_repos(self.env, config_path, None)
if self.should_delete_build(build, repos):
self.log.info('Scheduling build %d for deletion', build.id)
builds_to_delete.append(build)
elif build.platform in platforms:
build_found = True
break
if not build_found:
self.log.debug('No pending builds.')
build = None
# delete any obsolete builds
for build_to_delete in builds_to_delete:
build_to_delete.delete()
if build:
build.slave = name
build.slave_info.update(properties)
build.status = Build.IN_PROGRESS
build.update()
return build
def match_slave(self, name, properties):
"""Match a build slave against available target platforms.
:param name: the name of the slave
:type name: `basestring`
:param properties: the slave configuration
:type properties: `dict`
:return: the list of platforms the slave matched
"""
platforms = []
for config in BuildConfig.select(self.env):
for platform in TargetPlatform.select(self.env, config=config.name):
match = True
for propname, pattern in ifilter(None, platform.rules):
try:
propvalue = properties.get(propname)
if not propvalue or not re.match(pattern,
propvalue, re.I):
match = False
break
except re.error:
self.log.error('Invalid platform matching pattern "%s"',
pattern, exc_info=True)
match = False
break
if match:
self.log.debug('Slave %r matched target platform %r of '
'build configuration %r', name,
platform.name, config.name)
platforms.append(platform)
if not platforms:
self.log.warning('Slave %r matched none of the target platforms',
name)
return platforms
def populate(self):
"""Add a build for the next change on each build configuration to the
queue.
The next change is the latest repository check-in for which there isn't
a corresponding build on each target platform. Repeatedly calling this
method will eventually result in the entire change history of the build
configuration being in the build queue.
"""
builds = []
for config in BuildConfig.select(self.env):
platforms = []
for platform, rev, build in collect_changes(config):
if not self.build_all and platform.id in platforms:
# We've seen this platform already, so these are older
# builds that should only be built if built_all=True
self.log.debug('Ignoring older revisions for configuration '
'%r on %r', config.name, platform.name)
break
platforms.append(platform.id)
if build is None:
self.log.info('Enqueuing build of configuration "%s" at '
'revision [%s] on %s', config.name, rev,
platform.name)
_repos_name, repos, _repos_path = get_repos(
self.env, config.path, None)
rev_time = to_timestamp(repos.get_changeset(rev).date)
age = int(time.time()) - rev_time
if self.stabilize_wait and age < self.stabilize_wait:
self.log.info('Delaying build of revision %s until %s '
'seconds pass. Current age is: %s '
'seconds' % (rev, self.stabilize_wait,
age))
continue
build = Build(self.env, config=config.name,
platform=platform.id, rev=str(rev),
rev_time=rev_time)
builds.append(build)
for build in builds:
try:
build.insert()
except Exception, e:
# really only want to catch IntegrityErrors raised when
# a second slave attempts to add builds with the same
# (config, platform, rev) as an existing build.
self.log.info('Failed to insert build of configuration "%s" '
'at revision [%s] on platform [%s]: %s',
build.config, build.rev, build.platform, e)
raise
def reset_orphaned_builds(self):
"""Reset all in-progress builds to ``PENDING`` state if they've been
running so long that the configured timeout has been reached.
This is used to cleanup after slaves that have unexpectedly cancelled
a build without notifying the master, or are for some other reason not
reporting back status updates.
"""
if not self.timeout:
# If no timeout is set, none of the in-progress builds can be
# considered orphaned
return
with self.env.db_transaction as db:
now = int(time.time())
for build in Build.select(self.env, status=Build.IN_PROGRESS):
if now - build.last_activity < self.timeout:
# This build has not reached the timeout yet, assume it's still
# being executed
continue
self.log.info('Orphaning build %d. Last activity was %s (%s)' % \
(build.id, format_datetime(build.last_activity),
pretty_timedelta(build.last_activity)))
build.status = Build.PENDING
build.slave = None
build.slave_info = {}
build.started = 0
build.stopped = 0
build.last_activity = 0
for step in list(BuildStep.select(self.env, build=build.id)):
step.delete()
build.update()
Attachment.delete_all(self.env, 'build', build.resource.id)
#commit
def should_delete_build(self, build, repos):
config = BuildConfig.fetch(self.env, build.config)
config_name = config and config.name \
or 'unknown config "%s"' % build.config
platform = TargetPlatform.fetch(self.env, build.platform)
# Platform may or may not exist anymore - get safe name for logging
platform_name = platform and platform.name \
or 'unknown platform "%s"' % build.platform
# Drop build if platform no longer exists
if not platform:
self.log.info('Dropping build of configuration "%s" at '
'revision [%s] on %s because the platform no longer '
'exists', config.name, build.rev, platform_name)
return True
# Ignore pending builds for deactived build configs
if not (config and config.active):
self.log.info('Dropping build of configuration "%s" at '
'revision [%s] on %s because the configuration is '
'deactivated', config_name, build.rev, platform_name)
return True
# Stay within the revision limits of the build config
if (config.min_rev and repos.rev_older_than(build.rev,
config.min_rev)) \
or (config.max_rev and repos.rev_older_than(config.max_rev,
build.rev)):
self.log.info('Dropping build of configuration "%s" at revision [%s] on '
'"%s" because it is outside of the revision range of the '
'configuration', config.name, build.rev, platform_name)
return True
# If not 'build_all', drop if a more recent revision is available
if not self.build_all and \
len(list(Build.select(self.env, config=build.config,
min_rev_time=build.rev_time, platform=build.platform))) > 1:
self.log.info('Dropping build of configuration "%s" at revision [%s] '
'on "%s" because a more recent build exists',
config.name, build.rev, platform_name)
return True
return False
| 40.973761
| 85
| 0.570656
| 10,867
| 0.773232
| 2,039
| 0.145083
| 0
| 0
| 0
| 0
| 5,576
| 0.396755
|
3f4449fb416b741bfe8100121589dabfd4bff616
| 434
|
py
|
Python
|
dynamic-programming/Python/0120-triangle.py
|
lemonnader/LeetCode-Solution-Well-Formed
|
baabdb1990fd49ab82a712e121f49c4f68b29459
|
[
"Apache-2.0"
] | 1
|
2020-04-02T13:31:31.000Z
|
2020-04-02T13:31:31.000Z
|
dynamic-programming/Python/0120-triangle.py
|
lemonnader/LeetCode-Solution-Well-Formed
|
baabdb1990fd49ab82a712e121f49c4f68b29459
|
[
"Apache-2.0"
] | null | null | null |
dynamic-programming/Python/0120-triangle.py
|
lemonnader/LeetCode-Solution-Well-Formed
|
baabdb1990fd49ab82a712e121f49c4f68b29459
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
size = len(triangle)
if size == 0:
return 0
dp = [0] * size
for i in range(size):
dp[i] = triangle[size - 1][i]
for i in range(size - 2, - 1, -1):
for j in range(i + 1):
dp[j] = min(dp[j], dp[j + 1]) + triangle[i][j]
return dp[0]
| 27.125
| 62
| 0.47235
| 407
| 0.937788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3f4460e3255eb428d7e5749918a9b4a6ef898fc7
| 5,063
|
py
|
Python
|
regparser/tree/xml_parser/tree_utils.py
|
pkfec/regulations-parser
|
ff6b29dcce0449a133e7b93dd462ab3110f80a5d
|
[
"CC0-1.0"
] | 26
|
2016-06-04T20:48:09.000Z
|
2021-07-28T18:13:30.000Z
|
regparser/tree/xml_parser/tree_utils.py
|
pkfec/regulations-parser
|
ff6b29dcce0449a133e7b93dd462ab3110f80a5d
|
[
"CC0-1.0"
] | 146
|
2016-04-06T19:07:54.000Z
|
2022-01-02T20:09:53.000Z
|
regparser/tree/xml_parser/tree_utils.py
|
pkfec/regulations-parser
|
ff6b29dcce0449a133e7b93dd462ab3110f80a5d
|
[
"CC0-1.0"
] | 28
|
2016-04-09T20:40:48.000Z
|
2021-05-08T17:52:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from copy import deepcopy
from functools import wraps
from itertools import chain
from lxml import etree
from six.moves.html_parser import HTMLParser
from regparser.tree.priority_stack import PriorityStack
def prepend_parts(parts_prefix, n):
""" Recursively preprend parts_prefix to the parts of the node
n. Parts is a list of markers that indicates where you are in the
regulation text. """
n.label = parts_prefix + n.label
for c in n.children:
prepend_parts(parts_prefix, c)
return n
class NodeStack(PriorityStack):
""" The NodeStack aids our construction of a struct.Node tree. We process
xml one paragraph at a time; using a priority stack allows us to insert
items at their proper depth and unwind the stack (collecting children) as
necessary"""
def unwind(self):
""" Unwind the stack, collapsing sub-paragraphs that are on the stack
into the children of the previous level. """
children = self.pop()
parts_prefix = self.peek_last()[1].label
children = [prepend_parts(parts_prefix, c[1]) for c in children]
self.peek_last()[1].children = children
def collapse(self):
"""After all of the nodes have been inserted at their proper levels,
collapse them into a single root node"""
while self.size() > 1:
self.unwind()
return self.peek_last()[1]
def split_text(text, tokens):
"""
Given a body of text that contains tokens,
splice the text along those tokens.
"""
starts = [text.find(t) for t in tokens]
if not starts or starts[0] != 0:
starts.insert(0, 0)
slices = zip(starts, starts[1:])
texts = [text[i[0]:i[1]] for i in slices] + [text[starts[-1]:]]
return texts
def _combine_with_space(prev_text, next_text, add_space_if_needed):
"""Logic to determine where to add spaces to XML. Generally this is just
as matter of checking for space characters, but there are some
outliers"""
prev_text, next_text = prev_text or "", next_text or ""
prev_char, next_char = prev_text[-1:], next_text[:1]
needs_space = (not prev_char.isspace() and
not next_char.isspace() and
next_char and
prev_char not in u'([/<—-' and
next_char not in u').;,]>/—-')
if add_space_if_needed and needs_space:
return prev_text + " " + next_text
else:
return prev_text + next_text
def replace_xml_node_with_text(node, text):
"""There are some complications w/ lxml when determining where to add the
replacement text. Account for all of that here."""
parent, prev = node.getparent(), node.getprevious()
if prev is not None:
prev.tail = (prev.tail or '') + text
else:
parent.text = (parent.text or '') + text
parent.remove(node)
def replace_xpath(xpath):
"""Decorator to convert all elements matching the provided xpath in to
plain text. This'll convert the wrapped function into a new function which
will search for the provided xpath and replace all matches"""
def decorator(fn):
@wraps(fn)
def wrapped(node, add_spaces):
for element in node.xpath(xpath):
text = fn(element)
text = _combine_with_space(text, element.tail, add_spaces)
replace_xml_node_with_text(element, text)
return wrapped
return decorator
@replace_xpath(".//E[@T='52' or @T='54']")
def subscript_to_plaintext(element):
return "_{{{0}}}".format(element.text)
@replace_xpath(".//E[@T='51' or @T='53']|.//SU[not(@footnote)]")
def superscript_to_plaintext(element):
return "^{{{0}}}".format(element.text)
@replace_xpath(".//SU[@footnote]")
def footnotes_to_plaintext(element):
footnote = element.attrib['footnote']
footnote = footnote.replace('(', r'\(').replace(')', r'\)')
return u"[^{0}]({1})".format(element.text, footnote)
def get_node_text(node, add_spaces=False):
""" Extract all the text from an XML node (including the text of it's
children). """
node = deepcopy(node)
subscript_to_plaintext(node, add_spaces)
superscript_to_plaintext(node, add_spaces)
footnotes_to_plaintext(node, add_spaces)
parts = [node.text] + list(
chain(*([c.text, c.tail] for c in node.getchildren())))
final_text = ''
for part in filter(bool, parts):
final_text = _combine_with_space(final_text, part, add_spaces)
return final_text.strip()
_tag_black_list = ('PRTPAGE', )
def get_node_text_tags_preserved(xml_node):
"""Get the body of an XML node as a string, avoiding a specific blacklist
of bad tags."""
xml_node = deepcopy(xml_node)
etree.strip_tags(xml_node, *_tag_black_list)
# Remove the wrapping tag
node_text = xml_node.text or ''
node_text += ''.join(etree.tounicode(child) for child in xml_node)
node_text = HTMLParser().unescape(node_text)
return node_text
| 33.309211
| 78
| 0.661268
| 876
| 0.172883
| 0
| 0
| 765
| 0.150977
| 0
| 0
| 1,676
| 0.330768
|
3f450a61b8e2b1852d0f1a4d826ca4c04fcbb6db
| 10,638
|
py
|
Python
|
aiida/orm/implementation/querybuilder.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | 1
|
2019-07-31T04:08:13.000Z
|
2019-07-31T04:08:13.000Z
|
aiida/orm/implementation/querybuilder.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/orm/implementation/querybuilder.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Backend query implementation classes"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import abc
import six
from aiida.common import exceptions
from aiida.common.lang import abstractclassmethod, type_check
from aiida.common.exceptions import InputValidationError
__all__ = ('BackendQueryBuilder',)
@six.add_metaclass(abc.ABCMeta)
class BackendQueryBuilder(object):
"""Backend query builder interface"""
# pylint: disable=invalid-name,too-many-public-methods,useless-object-inheritance
outer_to_inner_schema = None
inner_to_outer_schema = None
def __init__(self, backend):
"""
:param backend: the backend
"""
from . import backends
type_check(backend, backends.Backend)
self._backend = backend
self.inner_to_outer_schema = dict()
self.outer_to_inner_schema = dict()
@abc.abstractmethod
def Node(self):
"""
Decorated as a property, returns the implementation for DbNode.
It needs to return a subclass of sqlalchemy.Base, which means that for different ORM's
a corresponding dummy-model must be written.
"""
@abc.abstractmethod
def Link(self):
"""
A property, decorated with @property. Returns the implementation for the DbLink
"""
@abc.abstractmethod
def Computer(self):
"""
A property, decorated with @property. Returns the implementation for the Computer
"""
@abc.abstractmethod
def User(self):
"""
A property, decorated with @property. Returns the implementation for the User
"""
@abc.abstractmethod
def Group(self):
"""
A property, decorated with @property. Returns the implementation for the Group
"""
@abc.abstractmethod
def AuthInfo(self):
"""
A property, decorated with @property. Returns the implementation for the AuthInfo
"""
@abc.abstractmethod
def Comment(self):
"""
A property, decorated with @property. Returns the implementation for the Comment
"""
@abc.abstractmethod
def Log(self):
"""
A property, decorated with @property. Returns the implementation for the Log
"""
@abc.abstractmethod
def table_groups_nodes(self):
"""
A property, decorated with @property. Returns the implementation for the many-to-many
relationship between group and nodes.
"""
@property
def AiidaNode(self):
"""
A property, decorated with @property. Returns the implementation for the AiiDA-class for Node
"""
from aiida.orm import Node
return Node
@abc.abstractmethod
def get_session(self):
"""
:returns: a valid session, an instance of sqlalchemy.orm.session.Session
"""
@abc.abstractmethod
def modify_expansions(self, alias, expansions):
"""
Modify names of projections if ** was specified.
This is important for the schema having attributes in a different table.
"""
@abstractclassmethod
def get_filter_expr_from_attributes(cls, operator, value, attr_key, column=None, column_name=None, alias=None): # pylint: disable=too-many-arguments
"""
Returns an valid SQLAlchemy expression.
:param operator: The operator provided by the user ('==', '>', ...)
:param value: The value to compare with, e.g. (5.0, 'foo', ['a','b'])
:param str attr_key:
The path to that attribute as a tuple of values.
I.e. if that attribute I want to filter by is the 2nd element in a list stored under the
key 'mylist', this is ('mylist', '2').
:param column: Optional, an instance of sqlalchemy.orm.attributes.InstrumentedAttribute or
:param str column_name: The name of the column, and the backend should get the InstrumentedAttribute.
:param alias: The aliased class.
:returns: An instance of sqlalchemy.sql.elements.BinaryExpression
"""
@classmethod
def get_corresponding_properties(cls, entity_table, given_properties, mapper):
"""
This method returns a list of updated properties for a given list of properties.
If there is no update for the property, the given property is returned in the list.
"""
if entity_table in mapper.keys():
res = list()
for given_property in given_properties:
res.append(cls.get_corresponding_property(entity_table, given_property, mapper))
return res
return given_properties
@classmethod
def get_corresponding_property(cls, entity_table, given_property, mapper):
"""
This method returns an updated property for a given a property.
If there is no update for the property, the given property is returned.
"""
try:
# Get the mapping for the specific entity_table
property_mapping = mapper[entity_table]
try:
# Get the mapping for the specific property
return property_mapping[given_property]
except KeyError:
# If there is no mapping, the property remains unchanged
return given_property
except KeyError:
# If it doesn't exist, it means that the given_property remains v
return given_property
@classmethod
def get_filter_expr_from_column(cls, operator, value, column):
"""
A method that returns an valid SQLAlchemy expression.
:param operator: The operator provided by the user ('==', '>', ...)
:param value: The value to compare with, e.g. (5.0, 'foo', ['a','b'])
:param column: an instance of sqlalchemy.orm.attributes.InstrumentedAttribute or
:returns: An instance of sqlalchemy.sql.elements.BinaryExpression
"""
# Label is used because it is what is returned for the
# 'state' column by the hybrid_column construct
# Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed
# pylint: disable=no-name-in-module,import-error
from sqlalchemy.sql.elements import Cast, Label
from sqlalchemy.orm.attributes import InstrumentedAttribute, QueryableAttribute
from sqlalchemy.sql.expression import ColumnClause
from sqlalchemy.types import String
if not isinstance(column, (Cast, InstrumentedAttribute, QueryableAttribute, Label, ColumnClause)):
raise TypeError('column ({}) {} is not a valid column'.format(type(column), column))
database_entity = column
if operator == '==':
expr = database_entity == value
elif operator == '>':
expr = database_entity > value
elif operator == '<':
expr = database_entity < value
elif operator == '>=':
expr = database_entity >= value
elif operator == '<=':
expr = database_entity <= value
elif operator == 'like':
# the like operator expects a string, so we cast to avoid problems
# with fields like UUID, which don't support the like operator
expr = database_entity.cast(String).like(value)
elif operator == 'ilike':
expr = database_entity.ilike(value)
elif operator == 'in':
expr = database_entity.in_(value)
else:
raise InputValidationError('Unknown operator {} for filters on columns'.format(operator))
return expr
@abc.abstractmethod
def get_projectable_attribute(self, alias, column_name, attrpath, cast=None, **kwargs):
pass
@abc.abstractmethod
def get_aiida_res(self, key, res):
"""
Some instance returned by ORM (django or SA) need to be converted
to Aiida instances (eg nodes)
:param key: the key that this entry would be returned with
:param res: the result returned by the query
:returns: an aiida-compatible instance
"""
@abc.abstractmethod
def yield_per(self, query, batch_size):
"""
:param int batch_size: Number of rows to yield per step
Yields *count* rows at a time
:returns: a generator
"""
@abc.abstractmethod
def count(self, query):
"""
:returns: the number of results
"""
@abc.abstractmethod
def first(self, query):
"""
Executes query in the backend asking for one instance.
:returns: One row of aiida results
"""
@abc.abstractmethod
def iterall(self, query, batch_size, tag_to_index_dict):
"""
:return: An iterator over all the results of a list of lists.
"""
@abc.abstractmethod
def iterdict(self, query, batch_size, tag_to_projected_properties_dict, tag_to_alias_map):
"""
:returns: An iterator over all the results of a list of dictionaries.
"""
@abc.abstractmethod
def get_column_names(self, alias):
"""
Return the column names of the given table (alias).
"""
def get_column(self, colname, alias): # pylint: disable=no-self-use
"""
Return the column for a given projection.
"""
try:
return getattr(alias, colname)
except AttributeError:
raise exceptions.InputValidationError("{} is not a column of {}\n"
"Valid columns are:\n"
"{}".format(
colname,
alias,
'\n'.join(alias._sa_class_manager.mapper.c.keys()) # pylint: disable=protected-access
))
| 36.682759
| 153
| 0.601805
| 9,605
| 0.902895
| 0
| 0
| 9,637
| 0.905903
| 0
| 0
| 5,646
| 0.530739
|
3f45b952c9fbaad033d9a0d8b00c659fec74f672
| 675
|
py
|
Python
|
qiskit/util.py
|
alejomonbar/qiskit-terra
|
207fe593f6f616b0d55b43afe4451dcaa672871a
|
[
"Apache-2.0"
] | null | null | null |
qiskit/util.py
|
alejomonbar/qiskit-terra
|
207fe593f6f616b0d55b43afe4451dcaa672871a
|
[
"Apache-2.0"
] | null | null | null |
qiskit/util.py
|
alejomonbar/qiskit-terra
|
207fe593f6f616b0d55b43afe4451dcaa672871a
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=wildcard-import,unused-wildcard-import
"""Common utilities for Qiskit."""
# Deprecated: for backwards compatibility to be removed in a future release
from qiskit.utils import *
| 35.526316
| 77
| 0.764444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 631
| 0.934815
|
3f46011535915198f4025241d624f246d85211f4
| 1,049
|
py
|
Python
|
examples/plot_spirals.py
|
zblz/gammapy
|
49539f25886433abeedc8852387ab4cd73977006
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_spirals.py
|
zblz/gammapy
|
49539f25886433abeedc8852387ab4cd73977006
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_spirals.py
|
zblz/gammapy
|
49539f25886433abeedc8852387ab4cd73977006
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Plot Milky Way spiral arm models.
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gammapy.astro.population.spatial import ValleeSpiral, FaucherSpiral
vallee_spiral = ValleeSpiral()
faucher_spiral = FaucherSpiral()
#theta = np.arange(0, 720)
radius = np.arange(2.1, 20, 0.1)
for spiralarm_index in range(4):
# Plot Vallee spiral
x, y = vallee_spiral.xy_position(radius=radius, spiralarm_index=spiralarm_index)
name = vallee_spiral.spiralarms[spiralarm_index]
plt.plot(x, y, label=name)
# Plot Faucher spiral
x, y = faucher_spiral.xy_position(radius=radius, spiralarm_index=spiralarm_index)
name = faucher_spiral.spiralarms[spiralarm_index]
plt.plot(x, y, ls='-.', label='Faucher ' + name)
plt.plot(vallee_spiral.bar['x'], vallee_spiral.bar['y'])
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.legend(ncol=2)
filename = 'valee_spiral.pdf'
print('Writing {0}'.format(filename))
plt.savefig(filename)
| 28.351351
| 85
| 0.737846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 227
| 0.216397
|
3f464d492cb70cfcafd85a5cef1d4df43430ab0b
| 7,462
|
py
|
Python
|
pytc/fitters/bayesian.py
|
jharman25/pytc
|
d9ccde3f04e35a3d821ff37a4ad42e62a048d4ac
|
[
"Unlicense"
] | 20
|
2017-04-27T16:30:03.000Z
|
2021-08-12T19:42:05.000Z
|
pytc/fitters/bayesian.py
|
jharman25/pytc
|
d9ccde3f04e35a3d821ff37a4ad42e62a048d4ac
|
[
"Unlicense"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/pytc/fitters/bayesian.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 6
|
2016-06-23T00:54:21.000Z
|
2020-05-19T05:24:20.000Z
|
__description__ = \
"""
Fitter subclass for performing bayesian (MCMC) fits.
"""
__author__ = "Michael J. Harms"
__date__ = "2017-05-10"
from .base import Fitter
import emcee, corner
import numpy as np
import scipy.optimize as optimize
import multiprocessing
class BayesianFitter(Fitter):
"""
"""
def __init__(self,num_walkers=100,initial_walker_spread=1e-4,ml_guess=True,
num_steps=100,burn_in=0.1,num_threads=1):
"""
Initialize the bayesian fitter
Parameters
----------
num_walkers : int > 0
how many markov chains to have in the analysis
initial_walker_spread : float
each walker is initialized with parameters sampled from normal
distributions with mean equal to the initial guess and a standard
deviation of guess*initial_walker_spread
ml_guess : bool
if true, do an ML optimization to get the initial guess
num_steps:
number of steps to run the markov chains
burn_in : float between 0 and 1
fraction of samples to discard from the start of the run
num_threads : int or `"max"`
number of threads to use. if `"max"`, use the total number of
cpus. [NOT YET IMPLEMENTED]
"""
Fitter.__init__(self)
self._num_walkers = num_walkers
self._initial_walker_spread = initial_walker_spread
self._ml_guess = ml_guess
self._num_steps = num_steps
self._burn_in = burn_in
self._num_threads = num_threads
if self._num_threads == "max":
self._num_threads = multiprocessing.cpu_count()
if not type(self._num_threads) == int and self._num_threads > 0:
err = "num_threads must be 'max' or a positive integer\n"
raise ValueError(err)
if self._num_threads != 1:
err = "multithreading has not yet been (fully) implemented.\n"
raise NotImplementedError(err)
self._success = None
self.fit_type = "bayesian"
def ln_prior(self,param):
"""
Log prior of fit parameters. Priors are uniform between bounds and
set to -np.inf outside of bounds.
Parameters
----------
param : array of floats
parameters to fit
Returns
-------
float value for log of priors.
"""
# If a paramter falls outside of the bounds, make the prior -infinity
if np.sum(param < self._bounds[0,:]) > 0 or np.sum(param > self._bounds[1,:]) > 0:
return -np.inf
# otherwise, uniform
return 0.0
def ln_prob(self,param):
"""
Posterior probability of model parameters.
Parameters
----------
param : array of floats
parameters to fit
Returns
-------
float value for log posterior proability
"""
# Calcualte prior. If not finite, this solution has an -infinity log
# likelihood
ln_prior = self.ln_prior(param)
if not np.isfinite(ln_prior):
return -np.inf
# Calcualte likelihood. If not finite, this solution has an -infinity
# log likelihood
ln_like = self.ln_like(param)
if not np.isfinite(ln_like):
return -np.inf
# log posterior is log prior plus log likelihood
return ln_prior + ln_like
def fit(self,model,parameters,bounds,y_obs,y_err=None,param_names=None):
"""
Fit the parameters.
Parameters
----------
model : callable
model to fit. model should take "parameters" as its only argument.
this should (usually) be GlobalFit._y_calc
parameters : array of floats
parameters to be optimized. usually constructed by GlobalFit._prep_fit
bounds : list
list of two lists containing lower and upper bounds
y_obs : array of floats
observations in an concatenated array
y_err : array of floats or None
standard deviation of each observation. if None, each observation
is assigned an error of 1/num_obs
param_names : array of str
names of parameters. If None, parameters assigned names p0,p1,..pN
"""
self._model = model
self._y_obs = y_obs
# Convert the bounds (list of lower and upper lists) into a 2d numpy array
self._bounds = np.array(bounds)
# If no error is specified, assign the error as 1/N, identical for all
# points
self._y_err = y_err
if y_err is None:
self._y_err = np.array([1/len(self._y_obs) for i in range(len(self._y_obs))])
if param_names is None:
self._param_names = ["p{}".format(i) for i in range(len(parameters))]
else:
self._param_names = param_names[:]
# Make initial guess (ML or just whatever the paramters sent in were)
if self._ml_guess:
fn = lambda *args: -self.weighted_residuals(*args)
ml_fit = optimize.least_squares(fn,x0=parameters,bounds=self._bounds)
self._initial_guess = np.copy(ml_fit.x)
else:
self._initial_guess = np.copy(parameters)
# Create walker positions
# Size of perturbation in parameter depends on the scale of the parameter
perturb_size = self._initial_guess*self._initial_walker_spread
ndim = len(parameters)
pos = [self._initial_guess + np.random.randn(ndim)*perturb_size
for i in range(self._num_walkers)]
# Sample using walkers
self._fit_result = emcee.EnsembleSampler(self._num_walkers, ndim, self.ln_prob,
threads=self._num_threads)
self._fit_result.run_mcmc(pos, self._num_steps)
# Create list of samples
to_discard = int(round(self._burn_in*self._num_steps,0))
self._samples = self._fit_result.chain[:,to_discard:,:].reshape((-1,ndim))
self._lnprob = self._fit_result.lnprobability[:,:].reshape(-1)
# Get mean and standard deviation
self._estimate = np.mean(self._samples,axis=0)
self._stdev = np.std(self._samples,axis=0)
# Calculate 95% confidence intervals
self._ninetyfive = []
lower = int(round(0.025*self._samples.shape[0],0))
upper = int(round(0.975*self._samples.shape[0],0))
for i in range(self._samples.shape[1]):
nf = np.sort(self._samples[:,i])
self._ninetyfive.append([nf[lower],nf[upper]])
self._ninetyfive = np.array(self._ninetyfive)
self._success = True
@property
def fit_info(self):
"""
Information about the Bayesian run.
"""
output = {}
output["Num walkers"] = self._num_walkers
output["Initial walker spread"] = self._initial_walker_spread
output["Use ML guess"] = self._ml_guess
output["Num steps"] = self._num_steps
output["Burn in"] = self._burn_in
output["Final sample number"] = len(self._samples[:,0])
output["Num threads"] = self._num_threads
return output
@property
def samples(self):
"""
Bayesian samples.
"""
return self._samples
| 32.163793
| 90
| 0.598767
| 7,196
| 0.964353
| 0
| 0
| 643
| 0.08617
| 0
| 0
| 3,422
| 0.45859
|
3f473e7173cd4e6d679a1656ee0296fc204724d2
| 166
|
py
|
Python
|
code/taskB/models.py
|
nft-appraiser/nft-appraiser-api
|
6d6495049851afd3d9bfc6969d0e1c9bc430dc81
|
[
"MIT"
] | null | null | null |
code/taskB/models.py
|
nft-appraiser/nft-appraiser-api
|
6d6495049851afd3d9bfc6969d0e1c9bc430dc81
|
[
"MIT"
] | null | null | null |
code/taskB/models.py
|
nft-appraiser/nft-appraiser-api
|
6d6495049851afd3d9bfc6969d0e1c9bc430dc81
|
[
"MIT"
] | null | null | null |
from django.db import models
class TaskB_table(models.Model):
img = models.ImageField(upload_to='taskB/', default='defo')
pred_price = models. FloatField()
| 23.714286
| 63
| 0.728916
| 134
| 0.807229
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.084337
|
3f47e5cac2344784ba9a8fd0999bd621214986ec
| 669
|
py
|
Python
|
DesignPatterns/FactoryPattern/SimpleFactory/autoFactory.py
|
Py-Himanshu-Patel/Learn-Python
|
47a50a934cabcce3b1cbdd4c88141a51f21d3a05
|
[
"MIT"
] | null | null | null |
DesignPatterns/FactoryPattern/SimpleFactory/autoFactory.py
|
Py-Himanshu-Patel/Learn-Python
|
47a50a934cabcce3b1cbdd4c88141a51f21d3a05
|
[
"MIT"
] | null | null | null |
DesignPatterns/FactoryPattern/SimpleFactory/autoFactory.py
|
Py-Himanshu-Patel/Learn-Python
|
47a50a934cabcce3b1cbdd4c88141a51f21d3a05
|
[
"MIT"
] | null | null | null |
from inspect import isclass, isabstract, getmembers
import autos
def isconcrete(obj):
return isclass(obj) and not isabstract(obj)
class AutoFactory:
vehicles = {} # { car model name: class for the car}
def __init__(self):
self.load_autos()
def load_autos(self):
classes = getmembers(autos, isconcrete)
for name, _type in classes:
if isclass(_type) and issubclass(_type, autos.AbstractAuto):
self.vehicles.update([[name, _type]])
def create_instance(self, carname):
if carname in self.vehicles:
return self.vehicles[carname]()
return autos.NullCar(carname)
| 25.730769
| 72
| 0.647235
| 530
| 0.792227
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.056801
|
3f48698c2248b56650a5e482a06629cf79f5bbbd
| 9,023
|
py
|
Python
|
Costa Rican Household Poverty Level Prediction/tens.py
|
hautan/train_tf
|
0946c7a497703f13c156de9f0135296fd91127ee
|
[
"Apache-2.0"
] | null | null | null |
Costa Rican Household Poverty Level Prediction/tens.py
|
hautan/train_tf
|
0946c7a497703f13c156de9f0135296fd91127ee
|
[
"Apache-2.0"
] | null | null | null |
Costa Rican Household Poverty Level Prediction/tens.py
|
hautan/train_tf
|
0946c7a497703f13c156de9f0135296fd91127ee
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# We must always import the relevant libraries for our problem at hand. NumPy and TensorFlow are required for this example.
# https://www.kaggle.com/c/costa-rican-household-poverty-prediction/data#_=_
import numpy as np
np.set_printoptions(threshold='nan')
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
def toInt(x):
if x == 'yes':
return 1
else:
if x == 'no':
return 0
else:
return x
costa_rica_household = pd.read_csv('data/train.csv')
#x1 =
costa_rica_household.describe()
#x1["v2a1"]
costa_rica_household.head()
list(costa_rica_household.dtypes)
#costa_rica_household = costa_rica_household.fillna(0)
costa_rica_household = costa_rica_household.fillna(costa_rica_household.mean())
#costa_rica_household["idhogar"] = costa_rica_household["idhogar"].apply(lambda x: int(x, 16))
#costa_rica_household["dependency"] = costa_rica_household["dependency"].apply(lambda x: toInt(x))
#costa_rica_household["edjefe"] = costa_rica_household["edjefe"].apply(lambda x: toInt(x))//edjefa
#costa_rica_household.loc[costa_rica_household['dependency'] == "'<='"]
#v1 = costa_rica_household[costa_rica_household['dependency'].apply(lambda x: type(x) == str)]['dependency']
#col_name = costa_rica_household.columns
#print(list(col_name))
#costa_rica_household[["age", "SQBage", "agesq", "r4h1", "r4h2"]]
cols_to_norm = ['v2a1', 'hacdor', 'rooms', 'hacapo', 'v14a', 'refrig', 'v18q', 'v18q1',
'tamhog', 'tamviv', 'escolari', 'rez_esc', 'hhsize', 'paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother', 'pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera', 'techozinc', 'techoentrepiso', 'techocane', 'techootro',
'cielorazo', 'abastaguadentro', 'abastaguafuera', 'abastaguano', 'public', 'planpri', 'noelec', 'coopele', 'sanitario1', 'sanitario2', 'sanitario3',
'sanitario5', 'sanitario6', 'energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4', 'elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4',
'elimbasu5', 'elimbasu6', 'epared1', 'epared2', 'epared3', 'etecho1', 'etecho2', 'etecho3', 'eviv1', 'eviv2', 'eviv3', 'dis', 'male', 'female',
'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco1', 'parentesco2',
'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11',
'parentesco12', 'hogar_nin', 'hogar_adul', 'hogar_mayor', 'hogar_total', 'meaneduc', 'instlevel1',
'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9', 'bedrooms', 'overcrowding', 'tipovivi1',
'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5', 'computer', 'television', 'mobilephone', 'qmobilephone', 'lugar1', 'lugar2', 'lugar3', 'lugar4',
'lugar5', 'lugar6', 'area1', 'area2', 'SQBescolari', 'SQBhogar_total', 'SQBedjefe', 'SQBhogar_nin', 'SQBovercrowding', 'SQBdependency',
'SQBmeaned', 'agesq']
cat_cols_to_norm = ['r4h1', 'r4h2', 'r4h3', 'r4m1', 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3']
cols_of_interest = ['v2a1', 'hacdor', 'rooms', 'hacapo', 'v14a', 'refrig', 'v18q', 'v18q1', 'r4h1', 'r4h2', 'r4h3', 'r4m1', 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3',
'tamhog', 'tamviv', 'escolari', 'rez_esc', 'hhsize', 'paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother', 'pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera', 'techozinc', 'techoentrepiso', 'techocane', 'techootro',
'cielorazo', 'abastaguadentro', 'abastaguafuera', 'abastaguano', 'public', 'planpri', 'noelec', 'coopele', 'sanitario1', 'sanitario2', 'sanitario3',
'sanitario5', 'sanitario6', 'energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4', 'elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4',
'elimbasu5', 'elimbasu6', 'epared1', 'epared2', 'epared3', 'etecho1', 'etecho2', 'etecho3', 'eviv1', 'eviv2', 'eviv3', 'dis', 'male', 'female',
'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco1', 'parentesco2',
'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11',
'parentesco12', 'hogar_nin', 'hogar_adul', 'hogar_mayor', 'hogar_total', 'meaneduc', 'instlevel1',
'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9', 'bedrooms', 'overcrowding', 'tipovivi1',
'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5', 'computer', 'television', 'mobilephone', 'qmobilephone', 'lugar1', 'lugar2', 'lugar3', 'lugar4',
'lugar5', 'lugar6', 'area1', 'area2', 'SQBescolari', 'SQBhogar_total', 'SQBedjefe', 'SQBhogar_nin', 'SQBovercrowding', 'SQBdependency',
'SQBmeaned', 'agesq']
#costa_rica_household[cols_to_norm] = costa_rica_household[cols_to_norm].apply(lambda x: (x - x.min())/(x.max() - x.min()))
#costa_rica_household[cat_cols_to_norm] = costa_rica_household[cat_cols_to_norm].apply(lambda x: (x - x.min())/(x.max() - x.min()))
costa_rica_household[cols_of_interest] = costa_rica_household[cols_of_interest].apply(lambda x: (x - x.min())/(x.max() - x.min()))
feat_cols = []
for col_name in cols_to_norm:
col_name = tf.feature_column.numeric_column(col_name)
feat_cols.append(col_name)
age_range_count = [1,2,3,4,5,7]
r4h1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h1'), boundaries=age_range_count)
r4h2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h2'), boundaries=age_range_count)
r4h3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h3'), boundaries=age_range_count)
crossed_r4h = tf.feature_column.crossed_column([r4h1_bucket, r4h2_bucket, r4h3_bucket], 100)
#fc = [r4h1_bucket, r4h2_bucket, r4h3_bucket, crossed_r4h]
r4m1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m1'), boundaries=age_range_count)
r4m2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m2'), boundaries=age_range_count)
r4m3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m3'), boundaries=age_range_count)
crossed_r4m = tf.feature_column.crossed_column([r4m1_bucket, r4m2_bucket, r4m3_bucket], 100)
r4t1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t1'), boundaries=age_range_count)
r4t2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t2'), boundaries=age_range_count)
r4t3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t3'), boundaries=age_range_count)
crossed_r4t = tf.feature_column.crossed_column([r4t1_bucket, r4t2_bucket, r4t3_bucket], 100)
feat_cols.extend([r4h1_bucket, r4h2_bucket, r4h3_bucket, crossed_r4h, r4m1_bucket, r4m2_bucket, r4m3_bucket, crossed_r4m, r4t1_bucket, r4t2_bucket, r4t3_bucket, crossed_r4t])
len(feat_cols)
feat_cols[138]
estimator = tf.estimator.LinearClassifier(feature_columns=feat_cols, n_classes=4)
#costa_rica_household[(costa_rica_household.Target == 4)]
x_data = costa_rica_household.drop('Id', axis=1).drop('edjefa', axis=1).drop('idhogar', axis=1).drop('dependency', axis=1).drop('Target', axis=1)
#x_data['idhogar']
#x_data.describe()
#x_data.head()
labels = costa_rica_household['Target']
labels.head()
from sklearn.model_selection import train_test_split
X_train, X_eval, y_train, y_eval = train_test_split(x_data, labels, test_size=0.3, random_state=101)
print(X_train.shape, y_eval.shape)
input_func = tf.estimator.inputs.pandas_input_fn(x=X_train, y=y_train, batch_size=10, num_epochs=100, shuffle=True)
estimator.train(input_fn=input_func,steps=1000)
eval_input_func = tf.estimator.inputs.pandas_input_fn(x=X_eval, y=y_eval, batch_size=10, num_epochs=1, shuffle=False)
eval_metrics = estimator.evaluate(input_fn=eval_input_func)
print('Eval metrics')
print(eval_metrics)
pred_input_func = tf.estimator.inputs.pandas_input_fn(x=X_eval, shuffle=False)
predictions = []
for predict in estimator.predict(input_fn=pred_input_func):
predictions.append(predict)
predictions
#categorical_columun_voc = tf.feature_column.embedding_column(categorical_columun_voc, 4)
dnn_classifier = tf.estimator.DNNClassifier(hidden_units=[10, 10, 10], feature_columns=feat_cols, n_classes=2)
dnn_classifier.train(input_fn=input_func,steps=1000)
dnn_eval_metrics = dnn_classifier.evaluate(input_fn=eval_input_func)
dnn_eval_metrics
| 62.659722
| 174
| 0.715283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,456
| 0.493849
|
3f4b0ed4eea9580bec7a5e2d579164110301a866
| 4,095
|
py
|
Python
|
DTL_tests/unittests/test_api.py
|
rocktavious/DevToolsLib
|
117200c91a3361e04f7c8e07d2ed4999bbcfc469
|
[
"MIT"
] | 1
|
2015-03-23T18:52:12.000Z
|
2015-03-23T18:52:12.000Z
|
DTL_tests/unittests/test_api.py
|
rocktavious/DevToolsLib
|
117200c91a3361e04f7c8e07d2ed4999bbcfc469
|
[
"MIT"
] | null | null | null |
DTL_tests/unittests/test_api.py
|
rocktavious/DevToolsLib
|
117200c91a3361e04f7c8e07d2ed4999bbcfc469
|
[
"MIT"
] | 2
|
2017-05-21T12:50:41.000Z
|
2021-10-17T03:32:45.000Z
|
import os
import time
import unittest
from DTL.api import *
class TestCaseApiUtils(unittest.TestCase):
def setUp(self):
apiUtils.synthesize(self, 'mySynthesizeVar', None)
self.bit = apiUtils.BitTracker.getBit(self)
def test_wildcardToRe(self):
self.assertEquals(apiUtils.wildcardToRe('c:\CIG\main\*.*'),
'(?i)c\\:\\\\CIG\\\\main\\\\[^\\\\]*\\.[^\\\\]*$')
self.assertEquals(apiUtils.wildcardToRe('c:\CIG\main\*.*'),
apiUtils.wildcardToRe('c:/CIG/main/*.*'))
def test_synthesize(self):
self.assertIn('_mySynthesizeVar', self.__dict__)
self.assertTrue(hasattr(self, 'mySynthesizeVar'))
self.assertTrue(hasattr(self, 'getMySynthesizeVar'))
self.assertTrue(hasattr(self, 'setMySynthesizeVar'))
self.assertEqual(self.getMySynthesizeVar(), self.mySynthesizeVar)
def test_getClassName(self):
self.assertEqual(apiUtils.getClassName(self), 'TestCaseApiUtils')
def test_bittracker(self):
self.assertEqual(apiUtils.BitTracker.getBit(self), self.bit)
class TestCaseDotifyDict(unittest.TestCase):
def setUp(self):
self.dotifydict = DotifyDict({'one':{'two':{'three':'value'}}})
def test_dotifydict(self):
self.assertEquals(self.dotifydict.one.two, {'three':'value'})
self.dotifydict.one.two.update({'three':3,'four':4})
self.assertEquals(self.dotifydict.one.two.four, 4)
self.assertEquals(self.dotifydict.one, self.dotifydict.one)
self.assertIn('two.three', (self.dotifydict.one))
self.assertEquals(str(self.dotifydict), "DotifyDict(datadict={'one': DotifyDict(datadict={'two': DotifyDict(datadict={'four': 4, 'three': 3})})})")
self.assertEquals(self.dotifydict.one.two, eval(str(self.dotifydict.one.two)))
class TestCasePath(unittest.TestCase):
def setUp(self):
self.filepath = Path.getTempPath()
def test_path(self):
temp_path = Path.getTempPath()
self.assertEquals(self.filepath, temp_path)
self.assertEquals(self.filepath.name, temp_path.name)
self.assertEquals(self.filepath.parent, temp_path.parent)
self.assertIn(self.filepath.parent.parent.name, self.filepath)
myPathSepTest = Path('c:\\Users/krockman/documents').join('mytest')
self.assertEquals(myPathSepTest, os.path.join('c:','Users','krockman','documents','mytest'))
self.assertEquals({'TestKey', myPathSepTest},{'TestKey',os.path.join('c:','Users','krockman','documents','mytest')})
class TestCaseDocument(unittest.TestCase):
def setUp(self):
self.doc = Document({'Testing':'min'})
self.doc.filepath = Path.getTempPath().join('document.dat')
def test_document(self):
self.assertEquals(self.doc.filepath, Path.getTempPath().join('document.dat'))
self.assertEquals(self.doc, eval(str(self.doc)))
self.doc.save()
self.assertTrue(self.doc.filepath.exists())
def tearDown(self):
self.doc.filepath.remove()
class TestCaseVersion(unittest.TestCase):
def setUp(self):
self.version = Version('2.0.5.Beta')
def test_version(self):
self.assertEquals(self.version,(2,0,5,'Beta'))
self.assertEquals(self.version,'2.0.5.Beta')
self.assertEquals(self.version,eval(str(self.version)))
self.version.update({'status':VersionStatus.Gold})
self.assertNotEquals(self.version,(2,0,5,'Beta'))
class TestCaseDecorators(unittest.TestCase):
@Safe
def test_safe(self):
1/0
@Timer
def test_timer(self, timer):
for i in range(5):
time.sleep(2)
timer.newLap(i)
@Profile
def test_profile(self):
for i in range(5):
(1 / 20 * 5 - 10 + 15) == 1
def main():
unittest.main(verbosity=2)
if __name__ == '__main__':
main()
| 35.301724
| 155
| 0.616361
| 3,867
| 0.944322
| 0
| 0
| 265
| 0.064713
| 0
| 0
| 610
| 0.148962
|
3f4bd6114512c0dce72c018cd5c68157e1b63e0a
| 2,840
|
py
|
Python
|
src/yellow_ball/src/ball.py
|
AndyHUI711/ELEC3210-Group7
|
08e5d9a7566447349a33ef577499ac2edbb9d6c3
|
[
"IJG"
] | 1
|
2021-12-16T09:57:44.000Z
|
2021-12-16T09:57:44.000Z
|
src/yellow_ball/src/ball.py
|
AndyHUI711/ELEC3210-Group7
|
08e5d9a7566447349a33ef577499ac2edbb9d6c3
|
[
"IJG"
] | null | null | null |
src/yellow_ball/src/ball.py
|
AndyHUI711/ELEC3210-Group7
|
08e5d9a7566447349a33ef577499ac2edbb9d6c3
|
[
"IJG"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import cv2
import math
import rospy
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import Bool
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
bridge = CvBridge()
laser_scan_on = True
def auto_mode_callback(msg):
global laser_scan_on
laser_scan_on = msg.data
def image_callback(msg):
global cv2_img
try:
cv2_img = bridge.imgmsg_to_cv2(msg, "bgr8")
cv2_img = cv2.flip(cv2_img, 1)
hsv = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20, 100, 100])
upper_yellow = np.array([30, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
#find ball
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) < 1 or laser_scan_on:
return
#locate ball
for c in contours:
M = cv2.moments(c)
cX = float(M["m10"]/M["m00"])
cY = float(M["m01"]/M["m00"])
rX = int(M["m10"]/M["m00"])
rY = int(M["m01"]/M["m00"])
radius = int(math.sqrt(cv2.contourArea(c)/math.pi))
h,w = cv2_img.shape[:2]
(ideal_X, ideal_Y) = (w/2, h-(20 + radius))
verticle_diff = cY-ideal_Y
angle_diff = cX-ideal_X
pub = rospy.Publisher('/vrep/cmd_vel', Twist, queue_size=10)
twist = Twist()
#linear
if verticle_diff <= -50:
twist.linear.x = 1.1
elif (verticle_diff > -50) & (verticle_diff < 0):
twist.linear.x = 0.5
elif verticle_diff >= 20:
twist.linear.x = -0.6
elif (verticle_diff <20) & (verticle_diff > 5):
twist.linear.x = -0.3
else:
twist.linear.x = 0
#angular
if angle_diff >= 30:
twist.angular.z = -1
elif (angle_diff < 30) & (angle_diff > 10):
twist.angular.z = -0.5
elif angle_diff <= -30:
twist.angular.z = 1
elif (angle_diff > -30) & (angle_diff < -10):
twist.angular.z = 0.5
else:
twist.angular.z = 0
pub.publish(twist)
copy_img = cv2_img.copy()
cv2.drawContours(copy_img, contours, -1, (0, 0, 255), 2)
cv2.circle(copy_img, (rX, rY), 3, (255, 0, 0), -1)
cv2.putText(copy_img, "centroid", (rX - 25, rY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
except CvBridgeError as err:
print(err)
def main():
rospy.init_node('ball', anonymous=True)
rospy.Subscriber('/vrep/laser_switch', Bool, auto_mode_callback)
rospy.Subscriber('/vrep/image', Image, image_callback)
rospy.spin()
if __name__ == '__main__':
main()
| 28.979592
| 107
| 0.560915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.06338
|
3f4d396e7dff26260074f0fb74d95a3f3b759b61
| 7,358
|
py
|
Python
|
dlkit/json_/authentication/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/json_/authentication/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/json_/authentication/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""JSON implementations of authentication queries."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from .. import utilities
from ..osid import queries as osid_queries
from ..primitives import Id
from ..utilities import get_registry
from dlkit.abstract_osid.authentication import queries as abc_authentication_queries
from dlkit.abstract_osid.osid import errors
class AgentQuery(abc_authentication_queries.AgentQuery, osid_queries.OsidObjectQuery):
"""This is the query for searching agents.
Each method specifies an ``AND`` term while multiple invocations of
the same method produce a nested ``OR``.
The following example returns agents whose display name begins with
"Tom" and whose "login name" is "tom" or "tjcoppet" in an agent
record specified by ``companyAgentType``.
Agent Query query = session.getAgentQuery();
query.matchDisplayName("Tom*", wildcardStringMatchType, true);
companyAgentQuery = query.getAgentQueryRecord(companyAgentType);
companyAgentQuery.matchLoginName("tom");
companyAgentQuery = query.getAgentQueryRecord(companyAgentType);
companyAgentQuery.matchLoginName("tjcoppet");
AgentList agentList = session.getAgentsByQuery(query);
"""
def __init__(self, runtime):
self._namespace = 'authentication.Agent'
self._runtime = runtime
record_type_data_sets = get_registry('AGENT_RECORD_TYPES', runtime)
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidObjectQuery.__init__(self, runtime)
@utilities.arguments_not_none
def match_resource_id(self, agency_id, match):
"""Sets the resource ``Id`` for this query.
arg: agency_id (osid.id.Id): a resource ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``agency_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('resourceId', str(agency_id), match)
def clear_resource_id_terms(self):
"""Clears the resource ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('resourceId')
resource_id_terms = property(fdel=clear_resource_id_terms)
def supports_resource_query(self):
"""Tests if a ``ResourceQuery`` is available.
return: (boolean) - ``true`` if a resource query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_resource_query(self):
"""Gets the query for a resource.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.resource.ResourceQuery) - the resource query
raise: Unimplemented - ``supports_resource_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_resource_query()`` is ``true``.*
"""
raise errors.Unimplemented()
resource_query = property(fget=get_resource_query)
@utilities.arguments_not_none
def match_any_resource(self, match):
"""Matches agents with any resource.
arg: match (boolean): ``true`` if to match agents with a
resource, ``false`` to match agents with no resource
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_resource_terms(self):
"""Clears the resource terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
resource_terms = property(fdel=clear_resource_terms)
@utilities.arguments_not_none
def match_agency_id(self, agency_id, match):
"""Sets the agency ``Id`` for this query.
arg: agency_id (osid.id.Id): an agency ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``agency_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_bin_id
self._add_match('assignedAgencyIds', str(agency_id), match)
def clear_agency_id_terms(self):
"""Clears the agency ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms
self._clear_terms('assignedAgencyIds')
agency_id_terms = property(fdel=clear_agency_id_terms)
def supports_agency_query(self):
"""Tests if an ``AgencyQuery`` is available.
return: (boolean) - ``true`` if an agency query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_agency_query(self):
"""Gets the query for an agency.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.authentication.AgencyQuery) - the agency query
raise: Unimplemented - ``supports_agency_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_agency_query()`` is ``true``.*
"""
raise errors.Unimplemented()
agency_query = property(fget=get_agency_query)
def clear_agency_terms(self):
"""Clears the agency terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('agency')
agency_terms = property(fdel=clear_agency_terms)
@utilities.arguments_not_none
def get_agent_query_record(self, agent_record_type):
"""Gets the agent query record corresponding to the given ``Agent`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
arg: agent_record_type (osid.type.Type): an agent record type
return: (osid.authentication.records.AgentQueryRecord) - the
agent query record
raise: NullArgument - ``agent_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(agent_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
| 36.425743
| 98
| 0.671786
| 6,658
| 0.904865
| 0
| 0
| 2,296
| 0.312041
| 0
| 0
| 4,940
| 0.671378
|
3f4d7a70b7445e8fd4a01a87b193501aed45d294
| 2,944
|
py
|
Python
|
PyStellar/stellar/Git/service/git_commit_service.py
|
psgstellar/Stellar
|
947d4b3d9d6b9c74d4c9ebd29683793a8d86fad2
|
[
"Apache-2.0"
] | 3
|
2021-01-24T17:07:55.000Z
|
2021-02-20T20:11:13.000Z
|
PyStellar/stellar/Git/service/git_commit_service.py
|
psgstellar/Stellar
|
947d4b3d9d6b9c74d4c9ebd29683793a8d86fad2
|
[
"Apache-2.0"
] | 61
|
2021-01-10T12:59:01.000Z
|
2021-06-24T09:19:20.000Z
|
PyStellar/stellar/Git/service/git_commit_service.py
|
psgstellar/Stellar
|
947d4b3d9d6b9c74d4c9ebd29683793a8d86fad2
|
[
"Apache-2.0"
] | 1
|
2021-01-14T05:23:32.000Z
|
2021-01-14T05:23:32.000Z
|
import requests
import dateutil.parser
import pytz
from Git.dao.git_dao import GitOwnerRepo
class GitCommitCheckService:
"""Github Public 저장소 커밋 기록 가져오기"""
@classmethod
def git_public_request(cls, request):
"""Commit 기록 요청"""
owner = request.GET['owner']
repo = request.GET['repo']
token = request.GET['token']
if request.GET.get('since', '') and request.GET.get('until', ''):
since = request.GET['since']
until = request.GET['until']
r = requests.get(f'https://api.github.com/repos/{owner}/{repo}/commits?my_client_id={owner}&since={since}&until={until}', headers={'Authorization': 'token '+token})
elif request.GET.get('since', ''):
since = request.GET['since']
r = requests.get(f'https://api.github.com/repos/{owner}/{repo}/commits?my_client_id={owner}&since={since}', headers={'Authorization': 'token '+token})
elif request.GET.get('until', ''):
until = request.GET['until']
r = requests.get(f'https://api.github.com/repos/{owner}/{repo}/commits?my_client_id={owner}&until={until}', headers={'Authorization': 'token '+token})
else:
r = requests.get(f'https://api.github.com/repos/{owner}/{repo}/commits?my_client_id={owner}', headers={'Authorization': 'token '+token})
data = r.json()
commit_json = None
commit_info = [None] * 4
if str(type(data)) == "<class 'list'>":
if str(data) != '[]':
local_timezone = pytz.timezone('Asia/Seoul')
commit_json = []
for i in data:
for k, v in i.items():
if k == 'commit':
commit_info[1] = v['message']
commit_info[2] = (dateutil.parser.parse(v['author']['date'])).replace(tzinfo=pytz.utc).astimezone(local_timezone)
elif k == 'author':
commit_info[0] = v['login']
elif k == 'html_url':
commit_info[3] = v
commit_json.append({'username': commit_info[0],
'message': commit_info[1],
'date': commit_info[2],
'url': commit_info[3]})
else:
commit_json = [{'username': owner, 'message': 'Fault Token Info OR Repo Info', 'date': None, 'url': None}]
return commit_json
@classmethod
def git_commit_insert(cls, commit_list):
""" 깃 커밋 리스트를 디비에 저장"""
list_tuple = []
for i in commit_list:
list_tuple.append(tuple(i.values()))
insert_commit = GitOwnerRepo()
return_json = insert_commit.insert_git_commit(list_tuple)
return return_json
| 43.940299
| 176
| 0.52038
| 2,902
| 0.967979
| 0
| 0
| 2,801
| 0.93429
| 0
| 0
| 840
| 0.280187
|
3f4ed1f83045dc59c913f12ff649d24264b9e68d
| 10,199
|
py
|
Python
|
apps/jetbrains/jetbrains.py
|
HansKlokkenspel/knausj_talon
|
af9254f9b5be73187573f113a42c905146c0aabd
|
[
"Unlicense"
] | null | null | null |
apps/jetbrains/jetbrains.py
|
HansKlokkenspel/knausj_talon
|
af9254f9b5be73187573f113a42c905146c0aabd
|
[
"Unlicense"
] | null | null | null |
apps/jetbrains/jetbrains.py
|
HansKlokkenspel/knausj_talon
|
af9254f9b5be73187573f113a42c905146c0aabd
|
[
"Unlicense"
] | null | null | null |
import os
import os.path
import requests
import time
from pathlib import Path
from talon import ctrl, ui, Module, Context, actions, clip
import tempfile
# Courtesy of https://github.com/anonfunc/talon-user/blob/master/apps/jetbrains.py
extendCommands = []
# Each IDE gets its own port, as otherwise you wouldn't be able
# to run two at the same time and switch between them.
# Note that MPS and IntelliJ ultimate will conflict...
port_mapping = {
"com.google.android.studio": 8652,
"com.jetbrains.AppCode": 8655,
"com.jetbrains.CLion": 8657,
"com.jetbrains.datagrip": 8664,
"com.jetbrains.goland-EAP": 8659,
"com.jetbrains.goland": 8659,
"com.jetbrains.intellij-EAP": 8653,
"com.jetbrains.intellij.ce": 8654,
"com.jetbrains.intellij": 8653,
"com.jetbrains.PhpStorm": 8662,
"com.jetbrains.pycharm": 8658,
"com.jetbrains.rider": 8660,
"com.jetbrains.rubymine": 8661,
"com.jetbrains.WebStorm": 8663,
"google-android-studio": 8652,
"idea64.exe": 8653,
"IntelliJ IDEA": 8653,
"jetbrains-appcode": 8655,
"jetbrains-clion": 8657,
"jetbrains-datagrip": 8664,
"jetbrains-goland-eap": 8659,
"jetbrains-goland": 8659,
"jetbrains-idea-ce": 8654,
"jetbrains-idea-eap": 8653,
"jetbrains-idea": 8653,
"jetbrains-phpstorm": 8662,
"jetbrains-pycharm-ce": 8658,
"jetbrains-pycharm": 8658,
"jetbrains-rider": 8660,
"jetbrains-rubymine": 8661,
"jetbrains-studio": 8652,
"jetbrains-webstorm": 8663,
"PyCharm": 8658,
"pycharm64.exe": 8658,
"webstorm64.exe": 8663,
}
select_verbs_map = {
"clear": ["action EditorBackSpace"],
"collapse": ["action CollapseRegion"],
"comment": ["action CommentByLineComment"],
"copy": ["action EditorCopy"],
"cut": ["action EditorCut"],
"drag down": ["action MoveLineDown"],
"drag up": ["action MoveLineUp"],
"expand": ["action ExpandRegion"],
"indent": ["action EditorIndentLineOrSelection"],
"refactor": ["action Refactorings.QuickListPopupAction"],
"rename": ["action RenameElement"],
"replace": ["action EditorPaste"],
"select": [],
"unindent": ["action EditorUnindentSelection"],
}
movement_verbs_map = {
"fix": ["action ShowIntentionActions"],
"go": [],
"paste": ["action EditorPaste"],
}
def set_extend(*commands):
def set_inner(_):
global extendCommands
extendCommands = commands
return set_inner
def _get_nonce(port, file_prefix):
file_name = file_prefix + str(port)
try:
with open(os.path.join(tempfile.gettempdir(), file_name), "r") as fh:
return fh.read()
except FileNotFoundError as e:
try:
home = str(Path.home())
with open(os.path.join(home, file_name), "r") as fh:
return fh.read()
except FileNotFoundError as eb:
print(f"Could not find {file_name} in tmp or home")
return None
except IOError as e:
print(e)
return None
def send_idea_command(cmd):
print("Sending {}".format(cmd))
active_app = ui.active_app()
bundle = active_app.bundle or active_app.name
port = port_mapping.get(bundle, None)
nonce = _get_nonce(port, ".vcidea_") or _get_nonce(port, "vcidea_")
print(f"sending {bundle} {port} {nonce}")
if port and nonce:
response = requests.get(
"http://localhost:{}/{}/{}".format(port, nonce, cmd), timeout=(0.05, 3.05)
)
response.raise_for_status()
return response.text
def get_idea_location():
return send_idea_command("location").split()
def idea_commands(commands):
command_list = commands.split(",")
print("executing jetbrains", commands)
global extendCommands
extendCommands = command_list
for cmd in command_list:
if cmd:
send_idea_command(cmd.strip())
time.sleep(0.1)
ctx = Context()
mod = Module()
mod.list("select_verbs", desc="Verbs for selecting in the IDE")
mod.list("movement_verbs", desc="Verbs for navigating the IDE")
@mod.action_class
class Actions:
def idea(commands: str):
"""Send a command to Jetbrains product"""
idea_commands(commands)
def idea_select(select_verb: str, commands: str):
"""Do a select command, then the specified commands"""
command_list = ",".join(commands.split(",") + select_verbs_map[select_verb])
print(command_list)
idea_commands(command_list)
def idea_movement(movement_verb: str, commands: str):
"""Do a select movement, then the specified commands"""
command_list = ",".join(commands.split(",") + movement_verbs_map[movement_verb])
print(command_list)
idea_commands(command_list)
def idea_grab(times: int):
"""Copies specified number of words to the left"""
old_clip = clip.get()
try:
original_line, original_column = get_idea_location()
for _ in range(times):
send_idea_command("action EditorSelectWord")
send_idea_command("action EditorCopy")
send_idea_command("goto {} {}".format(original_line, original_column))
send_idea_command("action EditorPaste")
finally:
clip.set(old_clip)
global extendCommands
extendCommands = []
def extend_action(number: str):
"""Repeat previous actions up to number of times"""
global extendCommands
count = max(int(number), 1)
for _ in range(count):
for cmd in extendCommands:
send_idea_command(cmd)
def set_extended_actions(commands: str):
"""Adds specified commands to the list of commands to repeat"""
set_extend(commands.split(","))
ctx.matches = r"""
app: /jetbrains/
app: IntelliJ IDEA
app: idea64.exe
app: PyCharm
app: PyCharm64.exe
app: pycharm64.exe
app: webstorm64.exe
"""
@ctx.action_class("user")
class user_actions:
def tab_jump(number: int):
if number < 10:
actions.user.idea("action GoToTab{}".format(number))
def perform_selection_action(verb: str):
"""Performs selection action defined for context"""
acts = select_verbs_map[verb]
for act in acts:
act()
def perform_movement_action(verb: str):
"""Performs movement action defined for context"""
acts = movement_verbs_map[verb]
for act in acts:
act()
def select_next_occurrence(verbs: str, text: str):
actions.user.idea_select(verbs, "find next {}".format(text))
def select_previous_occurrence(verbs: str, text: str):
actions.user.idea_select(verbs, "find prev {}".format(text))
def move_next_occurrence(verbs: str, text: str):
actions.user.idea_movement(
verbs, "find next {}, action EditorRight".format(text)
)
def move_previous_occurrence(verbs: str, text: str):
actions.user.idea_select(verbs, "find prev {}, action EditorRight".format(text))
def go_to_line(verb: str, line: int):
actions.user.idea_movement(verb, "goto {} 0".format(line))
def go_to_line_end(verb: str, line: int):
actions.user.idea_movement(verb, "goto {} 9999".format(line))
def select_word(verb: str):
actions.user.idea_select(verb, "action EditorSelectWord")
def select_whole_line(verb: str, line: int):
actions.user.idea_select(
verb, "goto {} 0, action EditorSelectLine".format(line)
)
def select_current_line(verb: str):
actions.user.idea_select(
verb, "action EditorLineStart, action EditorLineEndWithSelection"
)
def select_line(verb: str, line: int):
actions.user.idea_select(
verb,
"goto {} 0, action EditorLineStart, action EditorLineEndWithSelection".format(
line
),
)
def select_until_line(verb: str, line: int):
actions.user.idea_select(verb, "extend {}".format(line))
def select_range(verb: str, line_start: int, line_end: int):
actions.user.idea_select(verb, "range {} {}".format(line_start, line_end))
def select_way_left(verb: str):
actions.user.idea_select(verb, "action EditorLineStartWithSelection")
def select_way_right(verb: str):
actions.user.idea_select(verb, "action EditorLineEndWithSelection")
def select_way_up(verb: str):
actions.user.idea_select(verb, "action EditorTextStartWithSelection")
def select_way_down(verb: str):
actions.user.idea_select(verb, "action EditorTextEndWithSelection")
def select_camel_left(verb: str):
actions.user.idea_select(
verb, "action EditorPreviousWordInDifferentHumpsModeWithSelection"
)
def select_camel_right(verb: str):
actions.user.idea_select(
verb, "action EditorNextWordInDifferentHumpsModeWithSelection"
)
def select_all(verb: str):
actions.user.idea_select(verb, "action $SelectAll")
def select_left(verb: str):
actions.user.idea_select(verb, "action EditorLeftWithSelection")
def select_right(verb: str):
actions.user.idea_select(verb, "action EditorRightWithSelection")
def select_up(verb: str):
actions.user.idea_select(verb, "action EditorUpWithSelection")
def select_down(verb: str):
actions.user.idea_select(verb, "action EditorDownWithSelection")
def select_word_left(verb: str):
actions.user.idea_select(verb, "action EditorPreviousWordWithSelection")
def select_word_right(verb: str):
actions.user.idea_select(verb, "action EditorNextWordWithSelection")
def move_camel_left(verb: str):
actions.user.idea_movement(
verb, "action EditorPreviousWordInDifferentHumpsMode"
)
def move_camel_right(verb: str):
actions.user.idea_movement(verb, "action EditorNextWordInDifferentHumpsMode")
def line_clone(line: int):
actions.user.idea("clone {}".format(line))
ctx.lists["user.selection_verbs"] = select_verbs_map.keys()
ctx.lists["user.navigation_verbs"] = movement_verbs_map.keys()
| 31.772586
| 90
| 0.654574
| 5,803
| 0.568977
| 0
| 0
| 5,847
| 0.573291
| 0
| 0
| 3,368
| 0.330228
|
3f4f261effbec9ffc0f629f4f48d599f4fe3ee02
| 752
|
py
|
Python
|
be/model/db_conn.py
|
CharlesDDDD/bookstore
|
4052a06f5162100f14c4b762f058204792ceb3c3
|
[
"Apache-2.0"
] | null | null | null |
be/model/db_conn.py
|
CharlesDDDD/bookstore
|
4052a06f5162100f14c4b762f058204792ceb3c3
|
[
"Apache-2.0"
] | null | null | null |
be/model/db_conn.py
|
CharlesDDDD/bookstore
|
4052a06f5162100f14c4b762f058204792ceb3c3
|
[
"Apache-2.0"
] | null | null | null |
from be.table.user import User
from be.table.user_store import User_Store
from be.table.store import Store
class DBConn:
def user_id_exist(self, user_id):
row = User.query.filter(User.user_id == user_id).first()
if row is None:
return False
else:
return True
def book_id_exist(self, store_id, book_id):
row = Store.query.filter(Store.store_id == store_id, Store.book_id == book_id).first()
if row is None:
return False
else:
return True
def store_id_exist(self, store_id):
row = User_Store.query.filter(User_Store.store_id == store_id).first()
if row is None:
return False
else:
return True
| 26.857143
| 94
| 0.610372
| 642
| 0.853723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3f4f714fbc65f277fd1dc4334716ace380650956
| 22,334
|
py
|
Python
|
lib/roi_data_rel/fast_rcnn_rel.py
|
champon1020/TRACE
|
8ed0aed87e153af66f02502887a4de0d39867209
|
[
"MIT"
] | 34
|
2021-08-19T05:59:58.000Z
|
2022-03-26T09:26:54.000Z
|
lib/roi_data_rel/fast_rcnn_rel.py
|
champon1020/TRACE
|
8ed0aed87e153af66f02502887a4de0d39867209
|
[
"MIT"
] | 8
|
2021-09-15T05:27:23.000Z
|
2022-02-27T12:38:03.000Z
|
lib/roi_data_rel/fast_rcnn_rel.py
|
champon1020/TRACE
|
8ed0aed87e153af66f02502887a4de0d39867209
|
[
"MIT"
] | 6
|
2021-09-16T10:51:38.000Z
|
2022-03-05T22:48:54.000Z
|
# Adapted by Ji Zhang, 2019
#
# Based on Detectron.pytorch/lib/roi_data/fast_rcnn.py
# Original license text:
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Construct minibatches for Fast R-CNN training. Handles the minibatch blobs
that are specific to Fast R-CNN. Other blobs that are generic to RPN, etc.
are handled by their respecitive roi_data modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numpy.random as npr
import logging
from core.config import cfg
import utils_rel.boxes_rel as box_utils_rel
import utils.blob as blob_utils
import utils.fpn as fpn_utils
logger = logging.getLogger(__name__)
def add_rel_blobs(blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_rel_multilevel_rois(blobs)
return True
def _sample_pairs(roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM
pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic
max_pair_overlaps = roidb['max_pair_overlaps']
if cfg.MODEL.MULTI_RELATION:
prd_gt_overlaps = roidb['prd_gt_overlaps'].toarray()
prd_class_num = prd_gt_overlaps.shape[1]
gt_pair_inds, gt_pair_class = np.where(prd_gt_overlaps > 1.0 - 1e-4)
fg_pair_inds, fg_pair_class = np.where((prd_gt_overlaps >= cfg.TRAIN.FG_THRESH) &
(prd_gt_overlaps <= 1.0 - 1e-4))
hash_gt_pair_inds = prd_class_num * gt_pair_inds + gt_pair_class
hash_fg_pair_inds = prd_class_num * fg_pair_inds + fg_pair_class
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, hash_gt_pair_inds.size + hash_fg_pair_inds.size)
if hash_fg_pair_inds.size > 0 and fg_pairs_per_this_image > hash_gt_pair_inds.size:
hash_fg_pair_inds = npr.choice(
hash_fg_pair_inds, size=(fg_pairs_per_this_image - hash_gt_pair_inds.size), replace=False)
hash_fg_pair_inds = np.append(hash_fg_pair_inds, hash_gt_pair_inds)
elif fg_pairs_per_this_image <= hash_gt_pair_inds.size:
hash_gt_pair_inds = npr.choice(
hash_gt_pair_inds, size=fg_pairs_per_this_image, replace=False)
hash_fg_pair_inds = hash_gt_pair_inds
else:
hash_fg_pair_inds = hash_gt_pair_inds
blob_dict = {}
if cfg.MODEL.USE_BG:
bg_pair_inds, bg_pair_class_inds = np.where((prd_gt_overlaps < cfg.TRAIN.BG_THRESH_HI))
hash_bg_pair_inds = prd_class_num * bg_pair_inds + bg_pair_class_inds
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, hash_bg_pair_inds.size)
if hash_bg_pair_inds.size > 0:
hash_bg_pair_inds = npr.choice(
hash_bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
hash_keep_pair_inds = np.append(hash_fg_pair_inds, hash_bg_pair_inds)
multi_prd_labels = np.zeros(hash_keep_pair_inds.size, dtype=np.int32)
multi_prd_labels[:hash_fg_pair_inds.size] = 1.0 #fg_multi_prd_labels
keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num, hash_bg_pair_inds // prd_class_num)
keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num, hash_bg_pair_inds % prd_class_num)
else:
multi_prd_labels = np.ones(fg_multi_prd_labels.size, dtype=np.int32) #fg_multi_prd_labels
keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num)
keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num)
blob_dict['multi_prd_labels_int32'] = multi_prd_labels.astype(np.int32, copy=False)
blob_dict['keep_pair_class_int32'] = keep_pair_class.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([hash_fg_pair_inds.size], dtype=np.int32)
else:
gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0]
fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps <= 1.0 - 1e-4))[0]
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)
# Sample foreground regions without replacement
if fg_pair_inds.size > 0 and fg_pairs_per_this_image > gt_pair_inds.size:
fg_pair_inds = npr.choice(
fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False)
fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds)
elif fg_pairs_per_this_image <= gt_pair_inds.size:
gt_pair_inds = npr.choice(
gt_pair_inds, size=fg_pairs_per_this_image, replace=False)
fg_pair_inds = gt_pair_inds
else:
fg_pair_inds = gt_pair_inds
# Label is the class each RoI has max overlap with
fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds]
blob_dict = dict(
fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False))
if cfg.MODEL.USE_BG:
bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size)
# Sample foreground regions without replacement
if bg_pair_inds.size > 0:
bg_pair_inds = npr.choice(
bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
# logger.info('{} : {}'.format(fg_pair_inds.size, bg_pair_inds.size))
keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds)
all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32)
all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1
else:
keep_pair_inds = fg_pair_inds
all_prd_labels = fg_prd_labels
blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn
sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds]
sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds]
sampled_all_boxes = roidb['all_boxes']
det_labels = roidb['det_labels']
sampled_sbj_inds = roidb['sbj_id'][keep_pair_inds]
sampled_obj_inds = roidb['obj_id'][keep_pair_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois = sampled_sbj_boxes * im_scale
sampled_obj_rois = sampled_obj_boxes * im_scale
sampled_all_rois = sampled_all_boxes * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1))
all_boxes_repeated_batch_idx = batch_idx * blob_utils.ones((sampled_all_boxes.shape[0], 1))
sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois))
sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois))
sampled_all_rois = np.hstack((all_boxes_repeated_batch_idx, sampled_all_rois))
int_repeated_batch_idx = batch_idx * np.ones((keep_pair_inds.shape[0], 1), dtype=np.int)
blob_dict['sbj_inds'] = np.hstack((repeated_batch_idx, sampled_sbj_inds.reshape(-1, 1)))
blob_dict['obj_inds'] = np.hstack((repeated_batch_idx, sampled_obj_inds.reshape(-1, 1)))
blob_dict['sbj_rois'] = sampled_sbj_rois
blob_dict['obj_rois'] = sampled_obj_rois
blob_dict['det_rois'] = sampled_all_rois
blob_dict['det_labels'] = det_labels
sampled_rel_rois = box_utils_rel.rois_union(sampled_sbj_rois, sampled_obj_rois)
blob_dict['rel_rois'] = sampled_rel_rois
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat = box_utils_rel.get_spt_features(
sampled_sbj_boxes, sampled_obj_boxes, roidb['width'], roidb['height'])
blob_dict['spt_feat'] = sampled_spt_feat
if cfg.MODEL.USE_FREQ_BIAS:
sbj_labels = roidb['max_sbj_classes'][keep_pair_inds]
obj_labels = roidb['max_obj_classes'][keep_pair_inds]
blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False)
blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False)
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
nodes_per_image = cfg.MODEL.NODE_SAMPLE_SIZE
max_sbj_overlaps = roidb['max_sbj_overlaps']
max_obj_overlaps = roidb['max_obj_overlaps']
# sbj
# Here a naturally existing assumption is, each positive sbj should have at least one positive obj
sbj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
sbj_pos_obj_pos_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
sbj_pos_obj_neg_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if sbj_pos_pair_pos_inds.size > 0:
sbj_pos_pair_pos_inds = npr.choice(
sbj_pos_pair_pos_inds,
size=int(min(nodes_per_image, sbj_pos_pair_pos_inds.size)),
replace=False)
if sbj_pos_obj_pos_pair_neg_inds.size > 0:
sbj_pos_obj_pos_pair_neg_inds = npr.choice(
sbj_pos_obj_pos_pair_neg_inds,
size=int(min(nodes_per_image, sbj_pos_obj_pos_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = sbj_pos_obj_pos_pair_neg_inds
if nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size > 0 and sbj_pos_obj_neg_pair_neg_inds.size > 0:
sbj_pos_obj_neg_pair_neg_inds = npr.choice(
sbj_pos_obj_neg_pair_neg_inds,
size=int(min(nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size, sbj_pos_obj_neg_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = np.append(sbj_pos_pair_neg_inds, sbj_pos_obj_neg_pair_neg_inds)
sbj_pos_inds = np.append(sbj_pos_pair_pos_inds, sbj_pos_pair_neg_inds)
binary_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
binary_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_sbj_pos_int32'] = binary_labels_sbj_pos.astype(np.int32, copy=False)
prd_pos_labels_sbj_pos = roidb['max_prd_classes'][sbj_pos_pair_pos_inds]
prd_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
prd_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = prd_pos_labels_sbj_pos + 1
blob_dict['prd_labels_sbj_pos_int32'] = prd_labels_sbj_pos.astype(np.int32, copy=False)
sbj_labels_sbj_pos = roidb['max_sbj_classes'][sbj_pos_inds] + 1
# 1. set all obj labels > 0
obj_labels_sbj_pos = roidb['max_obj_classes'][sbj_pos_inds] + 1
# 2. find those negative obj
max_obj_overlaps_sbj_pos = roidb['max_obj_overlaps'][sbj_pos_inds]
obj_neg_inds_sbj_pos = np.where(max_obj_overlaps_sbj_pos < cfg.TRAIN.FG_THRESH)[0]
obj_labels_sbj_pos[obj_neg_inds_sbj_pos] = 0
blob_dict['sbj_labels_sbj_pos_int32'] = sbj_labels_sbj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_int32'] = obj_labels_sbj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_sbj_pos_fg_int32'] = roidb['max_sbj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_fg_int32'] = roidb['max_obj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_sbj_pos = roidb['sbj_boxes'][sbj_pos_inds]
sampled_obj_boxes_sbj_pos = roidb['obj_boxes'][sbj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_sbj_pos = sampled_sbj_boxes_sbj_pos * im_scale
sampled_obj_rois_sbj_pos = sampled_obj_boxes_sbj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((sbj_pos_inds.shape[0], 1))
sampled_sbj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_sbj_pos))
sampled_obj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_sbj_pos))
blob_dict['sbj_rois_sbj_pos'] = sampled_sbj_rois_sbj_pos
blob_dict['obj_rois_sbj_pos'] = sampled_obj_rois_sbj_pos
sampled_rel_rois_sbj_pos = box_utils_rel.rois_union(sampled_sbj_rois_sbj_pos, sampled_obj_rois_sbj_pos)
blob_dict['rel_rois_sbj_pos'] = sampled_rel_rois_sbj_pos
_, inds_unique_sbj_pos, inds_reverse_sbj_pos = np.unique(
sampled_sbj_rois_sbj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_sbj_pos.shape[0] == sampled_sbj_rois_sbj_pos.shape[0]
blob_dict['inds_unique_sbj_pos'] = inds_unique_sbj_pos
blob_dict['inds_reverse_sbj_pos'] = inds_reverse_sbj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_sbj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_sbj_pos, sampled_obj_boxes_sbj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_sbj_pos'] = sampled_spt_feat_sbj_pos
# obj
# Here a naturally existing assumption is, each positive obj should have at least one positive sbj
obj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
obj_pos_sbj_pos_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
obj_pos_sbj_neg_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if obj_pos_pair_pos_inds.size > 0:
obj_pos_pair_pos_inds = npr.choice(
obj_pos_pair_pos_inds,
size=int(min(nodes_per_image, obj_pos_pair_pos_inds.size)),
replace=False)
if obj_pos_sbj_pos_pair_neg_inds.size > 0:
obj_pos_sbj_pos_pair_neg_inds = npr.choice(
obj_pos_sbj_pos_pair_neg_inds,
size=int(min(nodes_per_image, obj_pos_sbj_pos_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = obj_pos_sbj_pos_pair_neg_inds
if nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size > 0 and obj_pos_sbj_neg_pair_neg_inds.size:
obj_pos_sbj_neg_pair_neg_inds = npr.choice(
obj_pos_sbj_neg_pair_neg_inds,
size=int(min(nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size, obj_pos_sbj_neg_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = np.append(obj_pos_pair_neg_inds, obj_pos_sbj_neg_pair_neg_inds)
obj_pos_inds = np.append(obj_pos_pair_pos_inds, obj_pos_pair_neg_inds)
binary_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
binary_labels_obj_pos[:obj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_obj_pos_int32'] = binary_labels_obj_pos.astype(np.int32, copy=False)
prd_pos_labels_obj_pos = roidb['max_prd_classes'][obj_pos_pair_pos_inds]
prd_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
prd_labels_obj_pos[:obj_pos_pair_pos_inds.size] = prd_pos_labels_obj_pos + 1
blob_dict['prd_labels_obj_pos_int32'] = prd_labels_obj_pos.astype(np.int32, copy=False)
obj_labels_obj_pos = roidb['max_obj_classes'][obj_pos_inds] + 1
# 1. set all sbj labels > 0
sbj_labels_obj_pos = roidb['max_sbj_classes'][obj_pos_inds] + 1
# 2. find those negative sbj
max_sbj_overlaps_obj_pos = roidb['max_sbj_overlaps'][obj_pos_inds]
sbj_neg_inds_obj_pos = np.where(max_sbj_overlaps_obj_pos < cfg.TRAIN.FG_THRESH)[0]
sbj_labels_obj_pos[sbj_neg_inds_obj_pos] = 0
blob_dict['sbj_labels_obj_pos_int32'] = sbj_labels_obj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_int32'] = obj_labels_obj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_obj_pos_fg_int32'] = roidb['max_sbj_classes'][obj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_fg_int32'] = roidb['max_obj_classes'][obj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_obj_pos = roidb['sbj_boxes'][obj_pos_inds]
sampled_obj_boxes_obj_pos = roidb['obj_boxes'][obj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_obj_pos = sampled_sbj_boxes_obj_pos * im_scale
sampled_obj_rois_obj_pos = sampled_obj_boxes_obj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((obj_pos_inds.shape[0], 1))
sampled_sbj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_obj_pos))
sampled_obj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_obj_pos))
blob_dict['sbj_rois_obj_pos'] = sampled_sbj_rois_obj_pos
blob_dict['obj_rois_obj_pos'] = sampled_obj_rois_obj_pos
sampled_rel_rois_obj_pos = box_utils_rel.rois_union(sampled_sbj_rois_obj_pos, sampled_obj_rois_obj_pos)
blob_dict['rel_rois_obj_pos'] = sampled_rel_rois_obj_pos
_, inds_unique_obj_pos, inds_reverse_obj_pos = np.unique(
sampled_obj_rois_obj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_obj_pos.shape[0] == sampled_obj_rois_obj_pos.shape[0]
blob_dict['inds_unique_obj_pos'] = inds_unique_obj_pos
blob_dict['inds_reverse_obj_pos'] = inds_reverse_obj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_obj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_obj_pos, sampled_obj_boxes_obj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_obj_pos'] = sampled_spt_feat_obj_pos
return blob_dict
def _add_rel_multilevel_rois(blobs):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_names):
"""Distribute rois over the different FPN levels."""
# Get target level for each roi
# Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take
# the box coordinates from columns 1:5
lowest_target_lvls = None
for rois_blob_name in rois_blob_names:
target_lvls = fpn_utils.map_rois_to_fpn_levels(
blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)
if lowest_target_lvls is None:
lowest_target_lvls = target_lvls
else:
lowest_target_lvls = np.minimum(lowest_target_lvls, target_lvls)
for rois_blob_name in rois_blob_names:
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
fpn_utils.add_multilevel_roi_blobs(
blobs, rois_blob_name, blobs[rois_blob_name], lowest_target_lvls, lvl_min,
lvl_max)
_distribute_rois_over_fpn_levels(['sbj_rois'])
_distribute_rois_over_fpn_levels(['obj_rois'])
_distribute_rois_over_fpn_levels(['rel_rois'])
_distribute_rois_over_fpn_levels(['det_rois'])
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
_distribute_rois_over_fpn_levels(['sbj_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['obj_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['rel_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['sbj_rois_obj_pos'])
_distribute_rois_over_fpn_levels(['obj_rois_obj_pos'])
_distribute_rois_over_fpn_levels(['rel_rois_obj_pos'])
| 57.266667
| 138
| 0.69392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,254
| 0.190472
|
3f4fa826c96bbe11c4299dd6bc861d0f137a3106
| 5,957
|
py
|
Python
|
src/mysql/tables.py
|
katerina7479/sooty-shearwater
|
1319a6f55443a73e50d265286746edd722404649
|
[
"MIT"
] | null | null | null |
src/mysql/tables.py
|
katerina7479/sooty-shearwater
|
1319a6f55443a73e50d265286746edd722404649
|
[
"MIT"
] | null | null | null |
src/mysql/tables.py
|
katerina7479/sooty-shearwater
|
1319a6f55443a73e50d265286746edd722404649
|
[
"MIT"
] | null | null | null |
import time
import re
from src.core.tables import Table, MigrationTable
from src.core.constraints import Index
class MysqlTable(Table):
@staticmethod
def _join_cols(cols):
'''Join and escape a list'''
return ', '.join(['`%s`' % i for i in cols])
@staticmethod
def _join_conditionals(row_dict):
'''Create a joined conditional statement for updates
return escaped string of `key`=val, `key`='val' for dictionary
'''
equalities = []
for key, val in row_dict.items():
temp = '`{}`='.format(key)
if isinstance(val, (int, float)):
temp += '{}'.format(val)
elif isinstance(val, str):
temp += '\'{}\''.format(val)
else:
raise TypeError('Value %s, type %s not recognised as a number or string' % (val, type(val)))
equalities.append(temp)
return ', '.join(equalities)
@staticmethod
def _qualify(table, cols):
'''Qualify, join and escape the list'''
return ', '.join(['`{}`.`{}`'.format(table, c) for c in cols])
@staticmethod
def _equals(cols, new_table, new_cols):
'''Qualify, join and equate'''
return ', '.join('`{}`=`{}`.`{}`'.format(cols[i], new_table, new_cols[i]) for i in range(len(cols)))
def insert_row(self, row_dict):
"""Add a row to the table"""
sql = self.commands.insert_row(
self.name,
self._join_cols(row_dict.keys()),
self._join_values(row_dict.values())
)
self.execute(sql)
return self.db.last_row
def get_column_definition(self, column_name):
'''Get the sql column definition
Selects the column type, and YES or NO from the column, IS NULLABLE.
That's enough information to re-create the column.
'''
sql = self.commands.column_definition(self.db.name, self.name, column_name)
ans = self.execute(sql)[0]
if ans[1] == 'NO':
return '{} NOT NULL'.format(ans[0])
else:
return ans[0]
def rename_column(self, old_name, new_name):
'''Rename a column'''
self.execute(self.commands.rename_column(
self.name,
old_name,
new_name,
self.get_column_definition(old_name))
)
@property
def create_statement(self):
"""Get table create statement"""
query = self.commands.get_table_create_statement(self.name)
if self.db.table_exists(self.name):
statement = self.execute(query)[0][1]
statement = re.sub('\s+', ' ', statement)
return statement
raise ValueError('Table does not exist, no create statement')
@property
def indexes(self):
"""Return list of indexes"""
indexes = self.execute(self.commands.get_indexes(self.name))
return [Index(tup[0], tup[2], tup[1], tup[4]) for tup in indexes]
class MySqlMigrationTable(MysqlTable, MigrationTable):
def create_from_source(self):
"""Create new table like source_table"""
create_statement = self.source.create_statement.replace(
'CREATE TABLE `{}`'.format(self.source.name),
'CREATE TABLE `{}`'
)
self.create_from_statement(create_statement)
def _trigger_name(self, method_type):
'Create trigger name'
name = 'migration_trigger_{}_{}'.format(method_type, self.source.name)
return name[:self.db.config['MAX_LENGTH_NAME']]
def create_insert_trigger(self):
'''Set insert Triggers.
'NEW' and 'OLD' are mysql references
see https://dev.mysql.com/doc/refman/5.0/en/create-trigger.html
'''
sql = self.commands.insert_trigger(
self._trigger_name('insert'),
self.source.name,
self.name,
self._join_cols(self.intersection.dest_columns),
self._qualify('NEW', self.intersection.origin_columns))
import pdb
pdb.set_trace()
print(sql)
self.execute(sql)
def create_delete_trigger(self):
'''Set delete triggers
'NEW' and 'OLD' are mysql references
see https://dev.mysql.com/doc/refman/5.0/en/create-trigger.html
'''
sql = self.commands.delete_trigger(
self._trigger_name('delete'),
self.source.name,
self.name,
self.primary_key_column)
self.execute(sql)
def create_update_trigger(self):
'''Set update triggers
'NEW' and 'OLD' are mysql references
see https://dev.mysql.com/doc/refman/5.0/en/create-trigger.html
'''
sql = self.commands.update_trigger(
self._trigger_name('update'),
self.source.name,
self.name,
self._equals(self.intersection.dest_columns, 'NEW', self.intersection.origin_columns),
self.primary_key_column
)
self.execute(sql)
def rename_tables(self):
'Rename the tables'
self.delete_triggers()
retries = 0
source_name, archive_name, migrate_name = self.source.name, self.source.archive_name, self.name
while True:
try:
self.execute(self.commands.rename_table(source_name, archive_name, migrate_name))
break
except Exception as e:
retries += 1
if retries > self.db.config['MAX_RENAME_RETRIES']:
self.create_triggers()
return False
# TODO: make sure this is a Lock wait timeout error before retrying
print('Rename retry %d, error: %s' % (retries, e))
time.sleep(self.db.donfig['RETRY_SLEEP_TIME'])
self.name, self.source.name = self.source.name, self.archive_name
print("Rename complete!")
return True
| 35.041176
| 108
| 0.583347
| 5,840
| 0.980359
| 0
| 0
| 1,777
| 0.298305
| 0
| 0
| 1,518
| 0.254826
|
3f508ed942c873013d3f6f30b02515c9088ebbfe
| 2,418
|
py
|
Python
|
CvZoneCompetition.py
|
MoranLeven/CvZomeCompetition
|
f28dc81ad4139902d831c34649ff3996106a2496
|
[
"Apache-2.0"
] | null | null | null |
CvZoneCompetition.py
|
MoranLeven/CvZomeCompetition
|
f28dc81ad4139902d831c34649ff3996106a2496
|
[
"Apache-2.0"
] | null | null | null |
CvZoneCompetition.py
|
MoranLeven/CvZomeCompetition
|
f28dc81ad4139902d831c34649ff3996106a2496
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
from time import sleep
import random
length_min = 80 # Minimum length of retangle
height_min = 80 # Minimum height of the angle
offset = 6 #Error allowed between pixel
pos_linha = 550
delay = 60 #FPS of video
detect = []
cars = 0
def paste_center (x, y, w, h):
x1 = int (w / 2)
y1 = int (h / 2)
cx = x + x1
cy = y + y1
return cx, cy
cap = cv2.VideoCapture ("DRONE-SURVEILLANCE-CONTEST-VIDEO.mp4")
cap.set (3,500)
cap.set (4,500)
subtractor = cv2.bgsegm.createBackgroundSubtractorMOG ()
while True:
ret, frame1 = cap.read ()
time = float(1 / delay)
sleep(time)
gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 10)
img_sub = subtractor.apply(blur)
dilate = cv2.dilate(img_sub, np.ones ((5,5)))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
dilated = cv2.morphologyEx(dilate, cv2. MORPH_CLOSE, kernel)
dilated = cv2.morphologyEx(dilated, cv2. MORPH_CLOSE, kernel)
contour, h = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.line(frame1, (25, pos_linha), (1900, pos_linha), (255,0,0), 3)
for (i, c) in enumerate(contour):
(x, y, w, h) = cv2.boundingRect(c)
validate_contour = (w >= length_min) and (h >= height_min)
if not validate_contour:
continue
cv2.rectangle(frame1, (x, y), (x + w, y + h), (0,255,0), 2)
center = paste_center (x, y, w, h)
detect.append(center)
cv2.circle(frame1, center, 4, (0, 0.255), -1)
cv2.putText(frame1,str(random.randint(1,200)),(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
for (x, y) in detect:
if y <(pos_linha + offset) and y> (pos_linha-offset):
cars += 1
cv2.line(frame1, (25, pos_linha), (1200, pos_linha), (0,127,255), 3)
cv2.putText(frame1, str (random.randint (1,200)), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 2)
detect.remove((x, y))
print("car is detected:" + str (cars))
cv2.putText(frame1, "Moran 11", (850, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 5)
cv2.putText(frame1, str(cars), (1700, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 5)
cv2.imshow("Surveillance Video", frame1)
if cv2.waitKey (10) == 27:
break
cv2.destroyAllWindows ()
cap.release ()
| 33.123288
| 118
| 0.611249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.076096
|
3f520d921ea722830523cfc9c9d0a7a9e7da7bf2
| 1,048
|
py
|
Python
|
setup.py
|
jiinus/django-db-prefix
|
2fae11b30dc339f3d4318a97900cf7cc4ff2cd51
|
[
"BSD-3-Clause"
] | 11
|
2016-12-17T20:01:43.000Z
|
2022-02-24T16:35:36.000Z
|
setup.py
|
jiinus/django-db-prefix
|
2fae11b30dc339f3d4318a97900cf7cc4ff2cd51
|
[
"BSD-3-Clause"
] | 4
|
2018-09-08T23:44:01.000Z
|
2021-09-22T06:33:16.000Z
|
setup.py
|
jiinus/django-db-prefix
|
2fae11b30dc339f3d4318a97900cf7cc4ff2cd51
|
[
"BSD-3-Clause"
] | 11
|
2015-11-13T09:26:58.000Z
|
2021-12-20T11:51:44.000Z
|
# -*- coding: utf-8 -*-
import os.path
from distutils.core import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='django-db-prefix',
version='1.0',
keywords='django database',
author=u'Ben Slavin <benjamin.slavin@gmail.com>, Denilson Sá <denilsonsa@gmail.com>',
packages=['django_db_prefix'],
url='https://github.com/denilsonsa/django-db-prefix',
license='BSD licence, see LICENCE',
description='Allow specification of a global, per-app or per-model database table name prefix.',
long_description=read('README.md'),
requires=[
'Django',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Database',
]
)
| 29.942857
| 100
| 0.625954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 601
| 0.572927
|
3f52461202339d3dfdcf90633c36a5d40fb4c967
| 2,104
|
py
|
Python
|
pyleecan/Methods/Slot/HoleUD/build_geometry.py
|
mjfwest/pyleecan
|
6946c863bea62d13f100def2d3f905c9de8721d0
|
[
"Apache-2.0"
] | 1
|
2020-10-19T09:01:00.000Z
|
2020-10-19T09:01:00.000Z
|
pyleecan/Methods/Slot/HoleUD/build_geometry.py
|
mjfwest/pyleecan
|
6946c863bea62d13f100def2d3f905c9de8721d0
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Methods/Slot/HoleUD/build_geometry.py
|
mjfwest/pyleecan
|
6946c863bea62d13f100def2d3f905c9de8721d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from numpy import arcsin, arctan, cos, exp, array, angle, pi
from numpy import imag as np_imag
from scipy.optimize import fsolve
from ....Classes.Segment import Segment
from ....Classes.SurfLine import SurfLine
from ....Classes.Arc1 import Arc1
from ....Methods import ParentMissingError
from ....Functions.labels import HOLEV_LAB, HOLEM_LAB
def build_geometry(self, alpha=0, delta=0, is_simplified=False):
"""Compute the curve (Segment) needed to plot the Hole.
The ending point of a curve is the starting point of the next curve in
the list
Parameters
----------
self : HoleUD
A HoleUD object
alpha : float
Angle to rotate the slot (Default value = 0) [rad]
delta : complex
Complex to translate the slot (Default value = 0)
is_simplified : bool
True to avoid line superposition (not used)
Returns
-------
surf_list: list
List of SurfLine needed to draw the Hole
"""
surf_list = self.surf_list
# Get correct label for surfaces
lam_label = self.parent.get_label()
R_id, surf_type = self.get_R_id()
vent_label = lam_label + "_" + surf_type + "_R" + str(R_id) + "-T"
mag_label = lam_label + "_" + HOLEM_LAB + "_R" + str(R_id) + "-T"
# Update surface labels
hole_id = 0
mag_id = 0
for surf in surf_list:
if HOLEM_LAB in surf.label:
key = "magnet_" + str(mag_id)
if key in self.magnet_dict and self.magnet_dict[key] is not None:
surf.label = mag_label + str(mag_id) + "-S0"
mag_id += 1
else: # Magnet disabled or not defined
surf.label = vent_label + str(hole_id) + "-S0"
hole_id += 1
elif HOLEV_LAB in surf.label:
surf.label = vent_label + str(hole_id) + "-S0"
hole_id += 1
# Apply the transformations
return_list = list()
for surf in surf_list:
return_list.append(surf.copy())
return_list[-1].rotate(alpha)
return_list[-1].translate(delta)
return return_list
| 30.941176
| 77
| 0.620722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 732
| 0.347909
|
3f52ca8d87119aa7ada69b18dd59026206c97a21
| 2,861
|
py
|
Python
|
tardis/tardis_portal/auth/localdb_auth.py
|
nrmay/mytardis
|
34a460cde6a847c66a6ec3725182c09dc9167bd5
|
[
"Apache-2.0"
] | null | null | null |
tardis/tardis_portal/auth/localdb_auth.py
|
nrmay/mytardis
|
34a460cde6a847c66a6ec3725182c09dc9167bd5
|
[
"Apache-2.0"
] | null | null | null |
tardis/tardis_portal/auth/localdb_auth.py
|
nrmay/mytardis
|
34a460cde6a847c66a6ec3725182c09dc9167bd5
|
[
"Apache-2.0"
] | null | null | null |
'''
Local DB Authentication module.
.. moduleauthor:: Gerson Galang <gerson.galang@versi.edu.au>
'''
import logging
from django.contrib.auth.models import User, Group
from django.contrib.auth.backends import ModelBackend
from tardis.tardis_portal.auth.interfaces import AuthProvider, GroupProvider, UserProvider
logger = logging.getLogger(__name__)
auth_key = u'localdb'
auth_display_name = u'Local DB'
_modelBackend = ModelBackend()
class DjangoAuthBackend(AuthProvider):
"""Authenticate against Django's Model Backend.
"""
def authenticate(self, request):
"""authenticate a user, this expect the user will be using
form based auth and the *username* and *password* will be
passed in as **POST** variables.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
"""
username = request.POST['username']
password = request.POST['password']
if not username or not password:
return None
return _modelBackend.authenticate(username, password)
def get_user(self, user_id):
try:
user = User.objects.get(username=user_id)
except User.DoesNotExist:
user = None
return user
class DjangoGroupProvider(GroupProvider):
name = u'django_group'
def getGroups(self, user):
"""return an iteration of the available groups.
"""
groups = user.groups.all()
return [g.id for g in groups]
def getGroupById(self, id):
"""return the group associated with the id::
{"id": 123,
"display": "Group Name",}
"""
groupObj = Group.objects.get(id=id)
if groupObj:
return {'id': id, 'display': groupObj.name}
return None
def searchGroups(self, **filter):
result = []
groups = Group.objects.filter(**filter)
for g in groups:
users = [u.username for u in User.objects.filter(groups=g)]
result += [{'id': g.id,
'display': g.name,
'members': users}]
return result
class DjangoUserProvider(UserProvider):
name = u'django_user'
def getUserById(self, id):
"""
return the user dictionary in the format of::
{"id": 123,
"first_name": "John",
"last_name": "Smith",
"email": "john@example.com"}
"""
try:
userObj = User.objects.get(username=id)
return {'id': id,
'first_name': userObj.first_name,
'last_name': userObj.last_name,
'email': userObj.email}
except User.DoesNotExist:
return None
django_user = DjangoUserProvider.name
django_group = DjangoGroupProvider.name
| 26.009091
| 90
| 0.595246
| 2,328
| 0.813702
| 0
| 0
| 0
| 0
| 0
| 0
| 961
| 0.335897
|
3f53cdf77e0b0d349cd123391fe47e0189614f36
| 29,318
|
py
|
Python
|
src/app.py
|
hubmapconsortium/search-api
|
21900c9ba5d353ab075d4b2cc217085b85d555b1
|
[
"MIT"
] | null | null | null |
src/app.py
|
hubmapconsortium/search-api
|
21900c9ba5d353ab075d4b2cc217085b85d555b1
|
[
"MIT"
] | 248
|
2020-02-27T20:45:25.000Z
|
2022-03-30T19:12:58.000Z
|
src/app.py
|
sennetconsortium/search-api
|
01a5c0ab8ec6abd147e5b04477ba10f80fedfdc3
|
[
"MIT"
] | 1
|
2022-02-03T19:49:55.000Z
|
2022-02-03T19:49:55.000Z
|
import os
import time
from pathlib import Path
from flask import Flask, jsonify, abort, request, Response, Request
import concurrent.futures
import threading
import requests
import logging
import ast
from urllib.parse import urlparse
from flask import current_app as app
from urllib3.exceptions import InsecureRequestWarning
from yaml import safe_load
# Local modules
from elasticsearch.indexer import Indexer
from libs.assay_type import AssayType
# HuBMAP commons
from hubmap_commons.hm_auth import AuthHelper
# Set logging fromat and level (default is warning)
# All the API logging is forwarded to the uWSGI server and gets written into the log file `uwsgo-entity-api.log`
# Log rotation is handled via logrotate on the host system with a configuration file
# Do NOT handle log file and rotation via the Python logging to avoid issues with multi-worker processes
logging.basicConfig(format='[%(asctime)s] %(levelname)s in %(module)s:%(lineno)d: %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
# Specify the absolute path of the instance folder and use the config file relative to the instance path
app = Flask(__name__, instance_path=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'instance'), instance_relative_config=True)
app.config.from_pyfile('app.cfg')
# load the index configurations and set the default
INDICES = safe_load((Path(__file__).absolute().parent / 'instance/search-config.yaml').read_text())
DEFAULT_INDEX_WITHOUT_PREFIX = INDICES['default_index']
logger.debug("############ INDICES config LOADED")
logger.debug(INDICES)
# Remove trailing slash / from URL base to avoid "//" caused by config with trailing slash
DEFAULT_ELASTICSEARCH_URL = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['elasticsearch']['url'].strip('/')
DEFAULT_ENTITY_API_URL = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['document_source_endpoint'].strip('/')
# Suppress InsecureRequestWarning warning when requesting status on https with ssl cert verify disabled
requests.packages.urllib3.disable_warnings(category = InsecureRequestWarning)
####################################################################################################
## Register error handlers
####################################################################################################
# Error handler for 400 Bad Request with custom error message
@app.errorhandler(400)
def http_bad_request(e):
return jsonify(error=str(e)), 400
# Error handler for 401 Unauthorized with custom error message
@app.errorhandler(401)
def http_unauthorized(e):
return jsonify(error=str(e)), 401
# Error handler for 403 Forbidden with custom error message
@app.errorhandler(403)
def http_forbidden(e):
return jsonify(error=str(e)), 403
# Error handler for 500 Internal Server Error with custom error message
@app.errorhandler(500)
def http_internal_server_error(e):
return jsonify(error=str(e)), 500
####################################################################################################
## AuthHelper initialization
####################################################################################################
# Initialize AuthHelper class and ensure singleton
try:
if AuthHelper.isInitialized() == False:
auth_helper_instance = AuthHelper.create(app.config['APP_CLIENT_ID'],
app.config['APP_CLIENT_SECRET'])
logger.info("Initialized AuthHelper class successfully :)")
else:
auth_helper_instance = AuthHelper.instance()
except Exception:
msg = "Failed to initialize the AuthHelper class"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
####################################################################################################
## Default route
####################################################################################################
@app.route('/', methods = ['GET'])
def index():
return "Hello! This is HuBMAP Search API service :)"
####################################################################################################
## Assay type API
####################################################################################################
@app.route('/assaytype', methods = ['GET'])
def assaytypes():
primary = None
simple = False
for key, val in request.args.items():
if key == 'primary':
primary = val.lower() == "true"
elif key == 'simple':
simple = val.lower() == "true"
else:
abort(400, f'invalid request parameter {key}')
if primary is None:
name_l = [name for name in AssayType.iter_names()]
else:
name_l = [name for name in AssayType.iter_names(primary=primary)]
if simple:
return jsonify(result=name_l)
else:
return jsonify(result=[AssayType(name).to_json() for name in name_l])
@app.route('/assaytype/<name>', methods = ['GET'])
@app.route('/assayname', methods = ['POST'])
def assayname(name=None):
if name is None:
request_json_required(request)
try:
name = request.json['name']
except Exception:
abort(400, 'request contains no "name" field')
try:
return jsonify(AssayType(name).to_json())
except Exception as e:
abort(400, str(e))
####################################################################################################
## API
####################################################################################################
# Both HTTP GET and HTTP POST can be used to execute search with body against ElasticSearch REST API.
# general search uses the DEFAULT_INDEX
@app.route('/search', methods = ['GET', 'POST'])
def search():
# Always expect a json body
request_json_required(request)
logger.info("======search with no index provided======")
logger.info ("default_index: " + DEFAULT_INDEX_WITHOUT_PREFIX)
# Determine the target real index in Elasticsearch to be searched against
# Use the DEFAULT_INDEX_WITHOUT_PREFIX since /search doesn't take any index
target_index = get_target_index(request, DEFAULT_INDEX_WITHOUT_PREFIX)
# get URL for that index
es_url = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['elasticsearch']['url'].strip('/')
# Return the elasticsearch resulting json data as json string
return execute_query('_search', request, target_index, es_url)
# Both HTTP GET and HTTP POST can be used to execute search with body against ElasticSearch REST API.
# Note: the index in URL is not he real index in Elasticsearch, it's that index without prefix
@app.route('/<index_without_prefix>/search', methods = ['GET', 'POST'])
def search_by_index(index_without_prefix):
# Always expect a json body
request_json_required(request)
# Make sure the requested index in URL is valid
validate_index(index_without_prefix)
logger.info("======requested index_without_prefix======")
logger.info(index_without_prefix)
# Determine the target real index in Elasticsearch to be searched against
target_index = get_target_index(request, index_without_prefix)
# get URL for that index
es_url = INDICES['indices'][index_without_prefix]['elasticsearch']['url'].strip('/')
# Return the elasticsearch resulting json data as json string
return execute_query('_search', request, target_index, es_url)
# HTTP GET can be used to execute search with body against ElasticSearch REST API.
@app.route('/count', methods = ['GET'])
def count():
# Always expect a json body
request_json_required(request)
logger.info("======count with no index provided======")
# Determine the target real index in Elasticsearch to be searched against
target_index = get_target_index(request, DEFAULT_INDEX_WITHOUT_PREFIX)
# get URL for that index
es_url = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['elasticsearch']['url'].strip('/')
# Return the elasticsearch resulting json data as json string
return execute_query('_count', request, target_index, es_url)
# HTTP GET can be used to execute search with body against ElasticSearch REST API.
# Note: the index in URL is not he real index in Elasticsearch, it's that index without prefix
@app.route('/<index_without_prefix>/count', methods = ['GET'])
def count_by_index(index_without_prefix):
# Always expect a json body
request_json_required(request)
# Make sure the requested index in URL is valid
validate_index(index_without_prefix)
logger.info("======requested index_without_prefix======")
logger.info(index_without_prefix)
# Determine the target real index in Elasticsearch to be searched against
target_index = get_target_index(request, index_without_prefix)
# get URL for that index
es_url = INDICES['indices'][index_without_prefix]['elasticsearch']['url'].strip('/')
# Return the elasticsearch resulting json data as json string
return execute_query('_count', request, target_index, es_url)
# Get a list of indices
@app.route('/indices', methods = ['GET'])
def indices():
# Return the resulting json data as json string
result = {
"indices": get_filtered_indices()
}
return jsonify(result)
# Get the status of Elasticsearch cluster by calling the health API
# This shows the connection status and the cluster health status (if connected)
@app.route('/status', methods = ['GET'])
def status():
response_data = {
# Use strip() to remove leading and trailing spaces, newlines, and tabs
'version': ((Path(__file__).absolute().parent.parent / 'VERSION').read_text()).strip(),
'build': ((Path(__file__).absolute().parent.parent / 'BUILD').read_text()).strip(),
'elasticsearch_connection': False
}
target_url = DEFAULT_ELASTICSEARCH_URL + '/_cluster/health'
#target_url = app.config['ELASTICSEARCH_URL'] + '/_cluster/health'
resp = requests.get(url = target_url)
if resp.status_code == 200:
response_data['elasticsearch_connection'] = True
# If connected, we also get the cluster health status
status_dict = resp.json()
# Add new key
response_data['elasticsearch_status'] = status_dict['status']
return jsonify(response_data)
# This reindex function will also reindex Collection and Upload
# in addition to the Dataset, Donor, Sample entities
@app.route('/reindex/<uuid>', methods=['PUT'])
def reindex(uuid):
# Reindex individual document doesn't require the token to belong
# to the HuBMAP-Data-Admin group
# since this is being used by entity-api and ingest-api too
token = get_user_token(request.headers)
try:
indexer = init_indexer(token)
threading.Thread(target=indexer.reindex, args=[uuid]).start()
# indexer.reindex(uuid) # for non-thread
logger.info(f"Started to reindex uuid: {uuid}")
except Exception as e:
logger.exception(e)
internal_server_error(e)
return f"Request of reindexing {uuid} accepted", 202
# Live reindex without first deleting and recreating the indices
# This just deletes the old document and add the latest document of each entity (if still available)
@app.route('/reindex-all', methods=['PUT'])
def reindex_all():
# The token needs to belong to the HuBMAP-Data-Admin group
# to be able to trigger a live reindex for all documents
token = get_user_token(request.headers, admin_access_required = True)
saved_request = request.headers
logger.debug(saved_request)
try:
indexer = init_indexer(token)
threading.Thread(target=reindex_all_uuids, args=[indexer, token]).start()
logger.info('Started live reindex all')
except Exception as e:
logger.exception(e)
internal_server_error(e)
return 'Request of live reindex all documents accepted', 202
####################################################################################################
## Internal Functions Used By API
####################################################################################################
# Throws error for 400 Bad Reqeust with message
def bad_request_error(err_msg):
abort(400, description = err_msg)
# Throws error for 401 Unauthorized with message
def unauthorized_error(err_msg):
abort(401, description = err_msg)
# Throws error for 403 Forbidden with message
def forbidden_error(err_msg):
abort(403, description = err_msg)
# Throws error for 500 Internal Server Error with message
def internal_server_error(err_msg):
abort(500, description = err_msg)
# Get user infomation dict based on the http request(headers)
# `group_required` is a boolean, when True, 'hmgroupids' is in the output
def get_user_info_for_access_check(request, group_required):
return auth_helper_instance.getUserInfoUsingRequest(request, group_required)
"""
Parase the token from Authorization header
Parameters
----------
request_headers: request.headers
The http request headers
admin_access_required : bool
If the token is required to belong to the HuBMAP-Data-Admin group, default to False
Returns
-------
str
The token string if valid
"""
def get_user_token(request_headers, admin_access_required = False):
# Get user token from Authorization header
# getAuthorizationTokens() also handles MAuthorization header but we are not using that here
try:
user_token = auth_helper_instance.getAuthorizationTokens(request_headers)
except Exception:
msg = "Failed to parse the Authorization token by calling commons.auth_helper.getAuthorizationTokens()"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
internal_server_error(msg)
# The user_token is flask.Response on error
if isinstance(user_token, Response):
# The Response.data returns binary string, need to decode
unauthorized_error(user_token.data.decode())
if admin_access_required:
# By now the token is already a valid token
# But we also need to ensure the user belongs to HuBMAP-Data-Admin group
# in order to execute the live reindex-all
# Return a 403 response if the user doesn't belong to HuBMAP-Data-Admin group
if not user_in_hubmap_data_admin_group(request):
forbidden_error("Access not granted")
return user_token
"""
Check if the user with token belongs to the HuBMAP-Data-Admin group
Parameters
----------
request : falsk.request
The flask http request object that containing the Authorization header
with a valid Globus nexus token for checking group information
Returns
-------
bool
True if the user belongs to HuBMAP-Data-Admin group, otherwise False
"""
def user_in_hubmap_data_admin_group(request):
try:
# The property 'hmgroupids' is ALWASYS in the output with using get_user_info()
# when the token in request is a nexus_token
user_info = get_user_info(request)
hubmap_data_admin_group_uuid = auth_helper_instance.groupNameToId('HuBMAP-Data-Admin')['uuid']
except Exception as e:
# Log the full stack trace, prepend a line with our message
logger.exception(e)
# If the token is not a nexus token, no group information available
# The commons.hm_auth.AuthCache would return a Response with 500 error message
# We treat such cases as the user not in the HuBMAP-Data-Admin group
return False
return (hubmap_data_admin_group_uuid in user_info['hmgroupids'])
"""
Get user infomation dict based on the http request(headers)
The result will be used by the trigger methods
Parameters
----------
request : Flask request object
The Flask request passed from the API endpoint
Returns
-------
dict
A dict containing all the user info
{
"scope": "urn:globus:auth:scope:nexus.api.globus.org:groups",
"name": "First Last",
"iss": "https://auth.globus.org",
"client_id": "21f293b0-5fa5-4ee1-9e0e-3cf88bd70114",
"active": True,
"nbf": 1603761442,
"token_type": "Bearer",
"aud": ["nexus.api.globus.org", "21f293b0-5fa5-4ee1-9e0e-3cf88bd70114"],
"iat": 1603761442,
"dependent_tokens_cache_id": "af2d5979090a97536619e8fbad1ebd0afa875c880a0d8058cddf510fc288555c",
"exp": 1603934242,
"sub": "c0f8907a-ec78-48a7-9c85-7da995b05446",
"email": "email@pitt.edu",
"username": "username@pitt.edu",
"hmscopes": ["urn:globus:auth:scope:nexus.api.globus.org:groups"],
}
"""
def get_user_info(request):
# `group_required` is a boolean, when True, 'hmgroupids' is in the output
user_info = auth_helper_instance.getUserInfoUsingRequest(request, True)
logger.debug("======get_user_info()======")
logger.debug(user_info)
# It returns error response when:
# - invalid header or token
# - token is valid but not nexus token, can't find group info
if isinstance(user_info, Response):
# Bubble up the actual error message from commons
# The Response.data returns binary string, need to decode
msg = user_info.get_data().decode()
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
raise Exception(msg)
return user_info
# Always expect a json body
def request_json_required(request):
if not request.is_json:
bad_request_error("A JSON body and appropriate Content-Type header are required")
# We'll need to verify the requested index in URL is valid
def validate_index(index_without_prefix):
separator = ','
#indices = get_filtered_indices()
indices = INDICES['indices'].keys()
if index_without_prefix not in indices:
bad_request_error(f"Invalid index name. Use one of the following: {separator.join(indices)}")
# Determine the target real index in Elasticsearch bases on the request header and given index (without prefix)
# The Authorization header with globus token is optional
# Case #1: Authorization header is missing, default to use the `hm_public_<index_without_prefix>`.
# Case #2: Authorization header with valid token, but the member doesn't belong to the HuBMAP-Read group, direct the call to `hm_public_<index_without_prefix>`.
# Case #3: Authorization header presents but with invalid or expired token, return 401 (if someone is sending a token, they might be expecting more than public stuff).
# Case #4: Authorization header presents with a valid token that has the group access, direct the call to `hm_consortium_<index_without_prefix>`.
def get_target_index(request, index_without_prefix):
# Case #1 and #2
target_index = INDICES['indices'][index_without_prefix]['public']
# Keys in request.headers are case insensitive
if 'Authorization' in request.headers:
# user_info is a dict
user_info = get_user_info_for_access_check(request, True)
logger.info("======user_info======")
logger.info(user_info)
# Case #3
if isinstance(user_info, Response):
# Notify the client with 401 error message
unauthorized_error("The globus token in the HTTP 'Authorization: Bearer <globus-token>' header is either invalid or expired.")
# Otherwise, we check user_info['hmgroupids'] list
# Key 'hmgroupids' presents only when group_required is True
else:
# Case #4
if app.config['GLOBUS_HUBMAP_READ_GROUP_UUID'] in user_info['hmgroupids']:
#target_index = app.config['PRIVATE_INDEX_PREFIX'] + index_without_prefix
target_index = INDICES['indices'][index_without_prefix]['private']
return target_index
# Make a call to Elasticsearch
def execute_query(query_against, request, index, es_url, query=None):
supported_query_against = ['_search', '_count']
separator = ','
if query_against not in supported_query_against:
bad_request_error(f"Query against '{query_against}' is not supported by Search API. Use one of the following: {separator.join(supported_query_against)}")
# Determine the target real index in Elasticsearch to be searched against
#index = get_target_index(request, index_without_prefix)
#target_url = app.config['ELASTICSEARCH_URL'] + '/' + target_index + '/' + query_against
#es_url = INDICES['indices'][index_without_prefix]['elasticsearch']['url'].strip('/')
logger.debug('es_url')
logger.debug(es_url)
logger.debug(type(es_url))
# use the index es connection
target_url = es_url + '/' + index + '/' + query_against
logger.debug("Target url: " + target_url)
if query is None:
# Parse incoming json string into json data(python dict object)
json_data = request.get_json()
# All we need to do is to simply pass the search json to elasticsearch
# The request json may contain "access_group" in this case
# Will also pass through the query string in URL
target_url = target_url + get_query_string(request.url)
# Make a request with json data
# The use of json parameter converts python dict to json string and adds content-type: application/json automatically
else:
json_data = query
logger.debug(json_data)
resp = requests.post(url=target_url, json=json_data)
logger.debug("==========response==========")
logger.debug(resp)
try:
return jsonify(resp.json())
except Exception as e:
logger.debug(e)
raise e
# Return the elasticsearch resulting json data as json string
return jsonify(resp)
# Get the query string from orignal request
def get_query_string(url):
query_string = ''
parsed_url = urlparse(url)
logger.debug("======parsed_url======")
logger.debug(parsed_url)
# Add the ? at beginning of the query string if not empty
if not parsed_url.query:
query_string = '?' + parsed_url.query
return query_string
# Get a list of entity uuids via entity-api for a given entity type:
# Collection, Donor, Sample, Dataset, Submission. Case-insensitive.
def get_uuids_by_entity_type(entity_type, token):
entity_type = entity_type.lower()
request_headers = create_request_headers_for_auth(token)
# Use different entity-api endpoint for Collection
if entity_type == 'collection':
#url = app.config['ENTITY_API_URL'] + "/collections?property=uuid"
url = DEFAULT_ENTITY_API_URL + "/collections?property=uuid"
else:
#url = app.config['ENTITY_API_URL'] + "/" + entity_type + "/entities?property=uuid"
url = DEFAULT_ENTITY_API_URL + "/" + entity_type + "/entities?property=uuid"
response = requests.get(url, headers = request_headers, verify = False)
if response.status_code != 200:
internal_server_error("get_uuids_by_entity_type() failed to make a request to entity-api for entity type: " + entity_type)
uuids_list = response.json()
return uuids_list
# Create a dict with HTTP Authorization header with Bearer token
def create_request_headers_for_auth(token):
auth_header_name = 'Authorization'
auth_scheme = 'Bearer'
headers_dict = {
# Don't forget the space between scheme and the token value
auth_header_name: auth_scheme + ' ' + token
}
return headers_dict
def get_uuids_from_es(index, es_url):
uuids = []
size = 10_000
query = {
"size": size,
"from": len(uuids),
"_source": ["_id"],
"query": {
"bool": {
"must": [],
"filter": [
{
"match_all": {}
}
],
"should": [],
"must_not": []
}
}
}
end_of_list = False
while not end_of_list:
logger.debug("Searching ES for uuids...")
logger.debug(es_url)
resp = execute_query('_search', None, index, es_url, query)
logger.debug('Got a response from ES...')
ret_obj = resp.get_json()
uuids.extend(hit['_id'] for hit in ret_obj.get('hits').get('hits'))
total = ret_obj.get('hits').get('total').get('value')
if total <= len(uuids):
end_of_list = True
else:
query['from'] = len(uuids)
return uuids
def init_indexer(token):
return Indexer(
INDICES,
app.config['APP_CLIENT_ID'],
app.config['APP_CLIENT_SECRET'],
token
)
def reindex_all_uuids(indexer, token):
with app.app_context():
try:
logger.info("############# Reindex Live Started #############")
start = time.time()
# Make calls to entity-api to get a list of uuids for each entity type
donor_uuids_list = get_uuids_by_entity_type("donor", token)
sample_uuids_list = get_uuids_by_entity_type("sample", token)
dataset_uuids_list = get_uuids_by_entity_type("dataset", token)
upload_uuids_list = get_uuids_by_entity_type("upload", token)
public_collection_uuids_list = get_uuids_by_entity_type("collection", token)
logger.debug("merging sets into a one list...")
# Merge into a big list that with no duplicates
all_entities_uuids = set(donor_uuids_list + sample_uuids_list + dataset_uuids_list + upload_uuids_list + public_collection_uuids_list)
es_uuids = []
#for index in ast.literal_eval(app.config['INDICES']).keys():
logger.debug("looping through the indices...")
logger.debug(INDICES['indices'].keys())
index_names = get_all_indice_names()
logger.debug(index_names)
for index in index_names.keys():
all_indices = index_names[index]
# get URL for that index
es_url = INDICES['indices'][index]['elasticsearch']['url'].strip('/')
for actual_index in all_indices:
es_uuids.extend(get_uuids_from_es(actual_index, es_url))
es_uuids = set(es_uuids)
logger.debug("looping through the UUIDs...")
# Remove entities found in Elasticserach but no longer in neo4j
for uuid in es_uuids:
if uuid not in all_entities_uuids:
logger.debug(f"Entity of uuid: {uuid} found in Elasticserach but no longer in neo4j. Delete it from Elasticserach.")
indexer.delete(uuid)
logger.debug("Starting multi-thread reindexing ...")
# Reindex in multi-treading mode for:
# - each public collection
# - each upload, only add to the hm_consortium_entities index (private index of the default)
# - each donor and its descendants in the tree
futures_list = []
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
public_collection_futures_list = [executor.submit(indexer.index_public_collection, uuid, reindex = True) for uuid in public_collection_uuids_list]
upload_futures_list = [executor.submit(indexer.index_upload, uuid, reindex = True) for uuid in upload_uuids_list]
donor_futures_list = [executor.submit(indexer.index_tree, uuid) for uuid in donor_uuids_list]
# Append the above three lists into one
futures_list = public_collection_futures_list + upload_futures_list + donor_futures_list
for f in concurrent.futures.as_completed(futures_list):
logger.debug(f.result())
end = time.time()
logger.info(f"############# Live Reindex-All Completed. Total time used: {end - start} seconds. #############")
except Exception as e:
logger.error(e)
# Gets a list of actually public and private indice names
def get_all_indice_names():
all_names = {}
try:
indices = INDICES['indices'].keys()
for i in indices:
index_info = {}
index_names = []
public_index = INDICES['indices'][i]['public']
private_index = INDICES['indices'][i]['private']
index_names.append(public_index)
index_names.append(private_index)
index_info[i] = index_names
all_names.update(index_info)
except Exception as e:
raise e
return all_names
# Get a list of filtered Elasticsearch indices to expose to end users without the prefix
def get_filtered_indices():
# just get all the defined index keys from the yml file
indices = INDICES['indices'].keys()
return list(indices)
# For local development/testing
if __name__ == "__main__":
try:
app.run(host='0.0.0.0', port="5005")
except Exception as e:
print("Error during starting debug server.")
print(str(e))
logger.error(e, exc_info=True)
print("Error during startup check the log file for further information")
| 38.934927
| 167
| 0.653728
| 0
| 0
| 0
| 0
| 6,882
| 0.234736
| 0
| 0
| 14,782
| 0.504195
|
3f55d1d3db5efaf77627369621529da7de9da985
| 149
|
py
|
Python
|
wordpress/apps.py
|
2e2a/django-wordpress
|
5417d98128ea6ad4308b250fdee65226e7deb628
|
[
"BSD-3-Clause"
] | 1
|
2021-12-03T19:55:27.000Z
|
2021-12-03T19:55:27.000Z
|
wordpress/apps.py
|
2e2a/django-wordpress
|
5417d98128ea6ad4308b250fdee65226e7deb628
|
[
"BSD-3-Clause"
] | null | null | null |
wordpress/apps.py
|
2e2a/django-wordpress
|
5417d98128ea6ad4308b250fdee65226e7deb628
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class WordpressAppConfig(AppConfig):
name = 'wordpress'
default_auto_field = 'django.db.models.AutoField'
| 24.833333
| 53
| 0.771812
| 113
| 0.758389
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.261745
|
3f5944acb466684ca6235f591cf5e26e8e10c295
| 2,613
|
py
|
Python
|
libs/libgmp/libgmp.py
|
wrobelda/craft-blueprints-kde
|
366f460cecd5baebdf3a695696767c8c0e5e7c7e
|
[
"BSD-2-Clause"
] | 14
|
2017-09-04T09:01:03.000Z
|
2022-01-04T20:09:00.000Z
|
libs/libgmp/libgmp.py
|
wrobelda/craft-blueprints-kde
|
366f460cecd5baebdf3a695696767c8c0e5e7c7e
|
[
"BSD-2-Clause"
] | 14
|
2017-12-15T08:11:22.000Z
|
2020-12-29T19:11:13.000Z
|
libs/libgmp/libgmp.py
|
wrobelda/craft-blueprints-kde
|
366f460cecd5baebdf3a695696767c8c0e5e7c7e
|
[
"BSD-2-Clause"
] | 19
|
2017-09-05T19:16:21.000Z
|
2020-10-18T12:46:06.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 Łukasz Wojniłowicz <lukasz.wojnilowicz@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import info
class subinfo(info.infoclass):
def setTargets(self):
self.targets["6.1.2"] = "https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2"
self.targetDigests['6.1.2'] = (['5275bb04f4863a13516b2f39392ac5e272f5e1bb8057b18aec1c9b79d73d8fb2'], CraftHash.HashAlgorithm.SHA256)
self.targetInstSrc["6.1.2"] = "gmp-6.1.2"
self.defaultTarget = "6.1.2"
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
if CraftCore.compiler.isMinGW():
self.buildDependencies["dev-utils/msys"] = None
from Package.AutoToolsPackageBase import *
from Package.VirtualPackageBase import *
class PackageAutoTools(AutoToolsPackageBase):
def __init__(self, **args):
AutoToolsPackageBase.__init__(self)
self.subinfo.options.package.withCompiler = False
self.subinfo.options.configure.args = "--disable-static --enable-shared --enable-cxx "
self.subinfo.options.useShadowBuild = False
if not CraftCore.compiler.isMSVC():
class Package(PackageAutoTools):
def __init__(self):
PackageAutoTools.__init__(self)
else:
class Package(VirtualPackageBase):
def __init__(self):
VirtualPackageBase.__init__(self)
| 42.836066
| 140
| 0.738615
| 1,095
| 0.418738
| 0
| 0
| 0
| 0
| 0
| 0
| 1,572
| 0.601147
|
3f599143770c118307d670eee5b87e03976f168c
| 9,712
|
py
|
Python
|
scluster/aws_create_resources.py
|
dorgun/ncluster
|
20ba95fb7250a5f7239d704b01bf468a57e8fb7b
|
[
"MIT"
] | null | null | null |
scluster/aws_create_resources.py
|
dorgun/ncluster
|
20ba95fb7250a5f7239d704b01bf468a57e8fb7b
|
[
"MIT"
] | null | null | null |
scluster/aws_create_resources.py
|
dorgun/ncluster
|
20ba95fb7250a5f7239d704b01bf468a57e8fb7b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Creates resources
# This script creates VPC/security group/keypair if not already present
import logging
import os
import sys
import time
from . import aws_util as u
from . import util
DRYRUN = False
DEBUG = True
# Names of Amazon resources that are created. These settings are fixed across
# all runs, and correspond to resources created once per user per region.
PUBLIC_TCP_RANGES = [
22, # ssh
(8888, 8899), # ipython notebook ports
6379, # redis port
(6006, 6016) # tensorboard ports
]
PUBLIC_UDP_RANGES = [(60000, 61000)] # mosh ports
logger = logging.getLogger(__name__)
def network_setup():
"""Creates VPC if it doesn't already exists, configures it for public
internet access, returns vpc, subnet, security_group"""
ec2 = u.get_ec2_resource()
client = u.get_ec2_client()
existing_vpcs = u.get_vpc_dict()
zones = u.get_zones()
# create VPC from scratch. Remove this if default VPC works well enough.
vpc_name = u.get_vpc_name()
if u.get_vpc_name() in existing_vpcs:
logger.info("Reusing VPC " + vpc_name)
vpc = existing_vpcs[vpc_name]
else:
logger.info("Creating VPC " + vpc_name)
vpc = ec2.create_vpc(CidrBlock='192.168.0.0/16')
# enable DNS on the VPC
local_response = vpc.modify_attribute(EnableDnsHostnames={"Value": True})
assert u.is_good_response(local_response)
local_response = vpc.modify_attribute(EnableDnsSupport={"Value": True})
assert u.is_good_response(local_response)
vpc.create_tags(Tags=u.create_name_tags(vpc_name))
vpc.wait_until_available()
gateways = u.get_gateway_dict(vpc)
gateway_name = u.get_gateway_name()
if gateway_name in gateways:
logger.info("Reusing gateways " + gateway_name)
else:
logger.info("Creating internet gateway " + gateway_name)
ig = ec2.create_internet_gateway()
ig.attach_to_vpc(VpcId=vpc.id)
ig.create_tags(Tags=u.create_name_tags(gateway_name))
# check that attachment succeeded
attach_state = u.extract_attr_for_match(ig.attachments, State=-1, VpcId=vpc.id)
assert attach_state == 'available', "vpc %s is in state %s" % (vpc.id, attach_state)
route_table = vpc.create_route_table()
route_table_name = u.get_route_table_name()
route_table.create_tags(Tags=u.create_name_tags(route_table_name))
dest_cidr = '0.0.0.0/0'
route_table.create_route(DestinationCidrBlock=dest_cidr, GatewayId=ig.id)
assert len(zones) <= 16 # for cidr/20 to fit into cidr/16
ip = 0
for zone in zones:
cidr_block = '192.168.%d.0/20' % (ip,)
ip += 16
logging.info("Creating subnet %s in zone %s" % (cidr_block, zone))
subnet = vpc.create_subnet(CidrBlock=cidr_block, AvailabilityZone=zone)
subnet.create_tags(Tags=[{'Key': 'Name', 'Value': f'{vpc_name}-subnet'}, {'Key': 'Region', 'Value': zone}])
local_response = client.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet.id)
assert u.is_good_response(local_response)
u.wait_until_available(subnet)
assert subnet.map_public_ip_on_launch, "Subnet doesn't enable public IP by default, why?"
route_table.associate_with_subnet(SubnetId=subnet.id)
existing_security_groups = u.get_security_group_dict(vpc.id)
security_group_name = u.get_security_group_name()
if security_group_name in existing_security_groups:
logger.info("Reusing security group " + security_group_name)
security_group = existing_security_groups[security_group_name]
assert security_group.vpc_id == vpc.id, f"Found security group {security_group} " \
f"attached to {security_group.vpc_id} but expected {vpc.id}"
else:
logging.info("Creating security group " + security_group_name)
security_group = ec2.create_security_group(
GroupName=security_group_name, Description=security_group_name,
VpcId=vpc.id)
cidr_ip = os.environ.get('SCLUSTER_SECURITY_GROUP_CidrIp', '0.0.0.0/0')
security_group.create_tags(Tags=u.create_name_tags(security_group_name))
# allow ICMP access for public ping
security_group.authorize_ingress(
CidrIp='0.0.0.0/0',
IpProtocol='icmp',
FromPort=-1,
ToPort=-1
)
# open public ports
# always include SSH port which is required for basic functionality
assert 22 in PUBLIC_TCP_RANGES, "Must enable SSH access"
for port in PUBLIC_TCP_RANGES:
if util.is_iterable(port):
assert len(port) == 2
from_port, to_port = port
else:
from_port, to_port = port, port
response = security_group.authorize_ingress(
IpProtocol="tcp",
CidrIp=cidr_ip,
FromPort=from_port,
ToPort=to_port
)
assert u.is_good_response(response)
for port in PUBLIC_UDP_RANGES:
if util.is_iterable(port):
assert len(port) == 2
from_port, to_port = port
else:
from_port, to_port = port, port
response = security_group.authorize_ingress(IpProtocol="udp",
CidrIp=cidr_ip,
FromPort=from_port,
ToPort=to_port)
assert u.is_good_response(response)
return vpc, security_group
def keypair_setup():
"""Creates keypair if necessary, saves private key locally, returns contents
of private key file."""
os.system('mkdir -p ' + u.PRIVATE_KEY_LOCATION)
keypair_name = u.get_keypair_name()
keypair = u.get_keypair_dict().get(keypair_name, None)
keypair_fn = u.get_keypair_fn()
if keypair:
print("Reusing keypair " + keypair_name)
# check that local pem file exists and is readable
assert os.path.exists(
keypair_fn), "Keypair %s exists, but corresponding .pem file %s is not found, delete keypair %s through " \
"console and run again to recreate keypair/.pem together" % (
keypair_name, keypair_fn, keypair_name)
keypair_contents = open(keypair_fn).read()
assert len(keypair_contents) > 0
else:
print("Creating keypair " + keypair_name)
ec2 = u.get_ec2_resource()
assert not os.path.exists(
keypair_fn), "previous keypair exists, delete it with 'sudo rm %s' and also delete corresponding " \
"keypair through console" % (keypair_fn)
keypair = ec2.create_key_pair(KeyName=keypair_name)
open(keypair_fn, 'w').write(keypair.key_material)
os.system('chmod 400 ' + keypair_fn)
return keypair
def placement_group_setup(group_name):
"""Creates placement_group group if necessary. Returns True if new placement_group
group was created, False otherwise."""
existing_placement_groups = u.get_placement_group_dict()
group = existing_placement_groups.get(group_name, None)
if group:
assert group.state == 'available'
assert group.strategy == 'cluster'
print("Reusing group ", group.name)
return group
print("Creating group " + group_name)
ec2 = u.get_ec2_resource()
group = ec2.create_placement_group(GroupName=group_name, Strategy='cluster')
return group
def create_resources():
logger.info(f"Creating {u.get_prefix()} resources in region {u.get_region()}")
vpc, security_group = network_setup()
keypair_setup() # saves private key locally to keypair_fn
# create EFS
efss = u.get_efs_dict()
efs_name = u.get_efs_name()
efs_id = efss.get(efs_name, '')
if not efs_id:
logger.info("Creating EFS " + efs_name)
efs_id = u.create_efs(efs_name)
else:
logger.info("Reusing EFS " + efs_name)
efs_client = u.get_efs_client()
# create mount target for each subnet in the VPC
# added retries because efs is not immediately available
max_failures = 10
retry_interval_sec = 1
for subnet in vpc.subnets.all():
for retry_attempt in range(max_failures):
try:
sys.stdout.write("Creating efs mount target for %s ... " % (subnet.availability_zone,))
sys.stdout.flush()
response = efs_client.create_mount_target(
FileSystemId=efs_id,
SubnetId=subnet.id,
SecurityGroups=[security_group.id]
)
if u.is_good_response(response):
logger.info("success")
break
except Exception as e:
if 'already exists' in str(e): # ignore "already exists" errors
logger.info('already exists')
break
# Takes couple of seconds for EFS to come online, with
# errors like this:
# Creating efs mount target for us-east-1f ... Failed with An error occurred (IncorrectFileSystemLifeCycleState) when calling the CreateMountTarget operation: None, retrying in 1 sec
logger.info("Got %s, retrying in %s sec" % (str(e), retry_interval_sec))
time.sleep(retry_interval_sec)
else:
logger.info("Giving up.")
if __name__ == '__main__':
create_resources()
| 37.9375
| 198
| 0.631899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,638
| 0.271623
|
3f59a6465e2607784678cb918b686e6250106802
| 142
|
py
|
Python
|
ex005-antecessorSucessor/005.py
|
KaiqueCassal/cursoEmVideoPython
|
9d37563045091e4d558e283d47a5a49378e9df71
|
[
"MIT"
] | 1
|
2021-08-11T04:38:33.000Z
|
2021-08-11T04:38:33.000Z
|
ex005-antecessorSucessor/005.py
|
KaiqueCassal/cursoEmVideoPython
|
9d37563045091e4d558e283d47a5a49378e9df71
|
[
"MIT"
] | null | null | null |
ex005-antecessorSucessor/005.py
|
KaiqueCassal/cursoEmVideoPython
|
9d37563045091e4d558e283d47a5a49378e9df71
|
[
"MIT"
] | null | null | null |
num = int(input('Digite um número inteiro: '))
print(f'O número: {num}'
f'\nO antecessor: {num - 1}'
f'\nO sucessor: {num + 1}')
| 23.666667
| 46
| 0.56338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.708333
|
3f59d7981fb0df6af9168f9da7f7187aa7eb35ac
| 2,253
|
py
|
Python
|
homeassistant/components/ihc/binary_sensor.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 7
|
2019-02-07T14:14:12.000Z
|
2019-07-28T06:56:10.000Z
|
homeassistant/components/ihc/binary_sensor.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:54:31.000Z
|
2022-03-12T00:50:43.000Z
|
homeassistant/components/ihc/binary_sensor.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 2
|
2020-04-19T13:35:24.000Z
|
2020-04-19T13:35:51.000Z
|
"""Support for IHC binary sensors."""
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import CONF_TYPE
from . import IHC_CONTROLLER, IHC_INFO
from .const import CONF_INVERTING
from .ihcdevice import IHCDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC binary sensor platform."""
if discovery_info is None:
return
devices = []
for name, device in discovery_info.items():
ihc_id = device["ihc_id"]
product_cfg = device["product_cfg"]
product = device["product"]
# Find controller that corresponds with device id
ctrl_id = device["ctrl_id"]
ihc_key = f"ihc{ctrl_id}"
info = hass.data[ihc_key][IHC_INFO]
ihc_controller = hass.data[ihc_key][IHC_CONTROLLER]
sensor = IHCBinarySensor(
ihc_controller,
name,
ihc_id,
info,
product_cfg.get(CONF_TYPE),
product_cfg[CONF_INVERTING],
product,
)
devices.append(sensor)
add_entities(devices)
class IHCBinarySensor(IHCDevice, BinarySensorDevice):
"""IHC Binary Sensor.
The associated IHC resource can be any in or output from a IHC product
or function block, but it must be a boolean ON/OFF resources.
"""
def __init__(
self,
ihc_controller,
name,
ihc_id: int,
info: bool,
sensor_type: str,
inverting: bool,
product=None,
) -> None:
"""Initialize the IHC binary sensor."""
super().__init__(ihc_controller, name, ihc_id, info, product)
self._state = None
self._sensor_type = sensor_type
self.inverting = inverting
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def is_on(self):
"""Return true if the binary sensor is on/open."""
return self._state
def on_ihc_change(self, ihc_id, value):
"""IHC resource has changed."""
if self.inverting:
self._state = not value
else:
self._state = value
self.schedule_update_ha_state()
| 28.884615
| 74
| 0.624057
| 1,128
| 0.500666
| 0
| 0
| 233
| 0.103418
| 0
| 0
| 513
| 0.227696
|
3f5c9bb50fc14ea221608e07d43fdec0123aef80
| 1,009
|
py
|
Python
|
script/TuneLR.py
|
yipeiw/parameter_server
|
07cbfbf2dc727ee0787d7e66e58a1f7fd8333aff
|
[
"Apache-2.0"
] | null | null | null |
script/TuneLR.py
|
yipeiw/parameter_server
|
07cbfbf2dc727ee0787d7e66e58a1f7fd8333aff
|
[
"Apache-2.0"
] | null | null | null |
script/TuneLR.py
|
yipeiw/parameter_server
|
07cbfbf2dc727ee0787d7e66e58a1f7fd8333aff
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os.path as path
import sys
tmpDir = '../config/tmp/'
logDir = '../config/tmp/log/'
conffile = sys.argv[1]
runfile=sys.argv[2]
lr = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
fout = open(runfile, 'w')
fout.write("#!/bin/bash\n\n\n")
fws = {}
confname = path.splitext(path.basename(conffile))[0]
loglist = confname+'.meta.log'
fl = open(loglist, 'w')
for i in range(0, len(lr)):
filename = confname+'_'+str(lr[i])
tmpfile = path.join(tmpDir, filename+'.conf')
logfile = path.join(logDir, filename + '.txt')
fws[i] = open(tmpfile, 'w')
fout.write("echo \""+"./local.sh 1 4 "+tmpfile + " 2>"+logfile+'\"\n\n')
fout.write("./local.sh 1 4 "+tmpfile + " 2>"+logfile+'\n\n\n')
fl.write(logfile+'\n')
fout.close()
fl.close()
for line in open(conffile):
if line.find("eta")==0:
for i in range(0, len(lr)):
output = "eta: "+str(lr[i]) + '\n'
fws[i].write(output)
else:
for i in range(0, len(lr)):
fws[i].write(line)
for i in range(0, len(lr)):
fws[i].close()
| 22.422222
| 73
| 0.607532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 201
| 0.199207
|
3f5ccedf65dad52ce01efa31808cc2b63ebe8af6
| 9,770
|
py
|
Python
|
pc.py
|
Omar8345/tic-tac-toe
|
e7fe6d9699bef3297227058defbb6d4ff35f10f4
|
[
"MIT"
] | null | null | null |
pc.py
|
Omar8345/tic-tac-toe
|
e7fe6d9699bef3297227058defbb6d4ff35f10f4
|
[
"MIT"
] | null | null | null |
pc.py
|
Omar8345/tic-tac-toe
|
e7fe6d9699bef3297227058defbb6d4ff35f10f4
|
[
"MIT"
] | 1
|
2022-02-10T17:47:27.000Z
|
2022-02-10T17:47:27.000Z
|
# Tic Tac Toe Game
# Original repository: (https://github.com/Omar8345/tic-tac-toe)
# Author: Omar Mostafa
# Date: 08/02/2022
# Version: 1.0
# Description: Tic Tac Toe Game made using Python Tkitner (Open Source)
# This game is a simple game that can be played with two players
# and can be played with a computer.
##### CODING STARTS HERE #####
# Importing the necessary libraries
from itertools import tee
import tkinter
import random
import time
from tkinter import messagebox
from numpy import empty
from time import sleep as sleep
try:
# Tkinter
window = tkinter.Tk()
window.title("Tic Tac Toe")
window.resizable(0, 0) # It makes everything needed to fit the window! WoW!
# Window icon
window.iconbitmap("img\XO.ico")
# Tkinter game buttons
# create 9 tkinter buttons
b1 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b1))
b1.grid(row=1, column=0)
b2 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b2))
b2.grid(row=1, column=1)
b3 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b3))
b3.grid(row=1, column=2)
b4 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b4))
b4.grid(row=2, column=0)
b5 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b5))
b5.grid(row=2, column=1)
b6 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b6))
b6.grid(row=2, column=2)
b7 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b7))
b7.grid(row=3, column=0)
b8 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b8))
b8.grid(row=3, column=1)
b9 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b9))
b9.grid(row=3, column=2)
# create a list to store the buttons
buttons = [b1, b2, b3, b4, b5, b6, b7, b8, b9]
# create a list to store the values of the buttons
values = []
# when button clicked, it puts X in the button
def btn_click(buttons):
# make clicked button disabled
buttons.config(state=tkinter.DISABLED)
# check if button contains O
if buttons['text'] == "O":
None
elif buttons['text'] == " ":
buttons.config(text="X")
buttons.config(bg="red")
elif buttons['text'] == "X":
None
else:
None
# check if 1st row is equal to X
if b1['text'] == "X" and b2['text'] == "X" and b3['text'] == "X":
print("X wins")
tkinter.messagebox.showinfo("Winner", "X wins")
# stop the game
window.destroy()
# check if 2nd row is equal to X
elif b4['text'] == "X" and b5['text'] == "X" and b6['text'] == "X":
print("X wins")
tkinter.messagebox.showinfo("Winner", "X wins")
# stop the game
window.destroy()
# check if 3rd row is equal to X
elif b7['text'] == "X" and b8['text'] == "X" and b9['text'] == "X":
print("X wins")
tkinter.messagebox.showinfo("Winner", "X wins")
# stop the game
window.destroy()
# check if 1st column is equal to X
elif b1['text'] == "X" and b4['text'] == "X" and b7['text'] == "X":
print("X wins")
tkinter.messagebox.showinfo("Winner", "X wins")
# stop the game
window.destroy()
# check if 2nd column is equal to X
elif b2['text'] == "X" and b5['text'] == "X" and b8['text'] == "X":
print("X wins")
tkinter.messagebox.showinfo("Winner", "X wins")
# stop the game
window.destroy()
# check if 3rd column is equal to X
elif b3['text'] == "X" and b6['text'] == "X" and b9['text'] == "X":
print("X wins")
tkinter.messagebox.showinfo("Winner", "X wins")
# stop the game
window.destroy()
# check if 1st diagonal is equal to X
elif b1['text'] == "X" and b5['text'] == "X" and b9['text'] == "X":
print("X wins")
tkinter.messagebox.showinfo("Winner", "X wins")
# stop the game
window.destroy()
# check if 2nd diagonal is equal to X
elif b3['text'] == "X" and b5['text'] == "X" and b7['text'] == "X":
print("X wins")
tkinter.messagebox.showinfo("Winner", "X wins")
# stop the game
window.destroy()
else:
emptybuttons = []
if b1['text'] == " ":
emptybuttons.append(b1)
if b2['text'] == " ":
emptybuttons.append(b2)
if b3['text'] == " ":
emptybuttons.append(b3)
if ['text'] == " ":
emptybuttons.append(b4)
if b5 == " ":
emptybuttons.append(b5)
if b6['text'] == " ":
emptybuttons.append(b6)
if b7['text'] == " ":
emptybuttons.append(b7)
if b8['text'] == " ":
emptybuttons.append(b8)
if b9['text'] == " ":
emptybuttons.append(b9)
# randomly select a button from the list
import random
random_button = random.choice(emptybuttons)
# change button text to O
random_button.config(text="O")
# make button disabled
random_button.config(state=tkinter.DISABLED)
# make O blue
random_button.config(bg="blue")
# clear the list
emptybuttons.clear()
# check if 1st row is equal to O
if b1['text'] == "O" and b2['text'] == "O" and b3['text'] == "O":
print("O wins")
# alert
tkinter.messagebox.showinfo("Winner", "O wins")
# stop the game
window.destroy()
# check if 2nd row is equal to O
elif b4['text'] == "O" and b5['text'] == "O" and b6['text'] == "O":
print("O wins")
tkinter.messagebox.showinfo("Winner", "O wins")
# stop the game
window.destroy()
# check if 3rd row is equal to O
elif b7['text'] == "O" and b8['text'] == "O" and b9['text'] == "O":
print("O wins")
tkinter.messagebox.showinfo("Winner", "O wins")
# stop the game
window.destroy()
# check if 1st column is equal to O
elif b1['text'] == "O" and b4['text'] == "O" and b7['text'] == "O":
print("O wins")
tkinter.messagebox.showinfo("Winner", "O wins")
# stop the game
window.destroy()
# check if 2nd column is equal to O
elif b2['text'] == "O" and b5['text'] == "O" and b8['text'] == "O":
print("O wins")
tkinter.messagebox.showinfo("Winner", "O wins")
# stop the game
window.destroy()
# check if 3rd column is equal to O
elif b3['text'] == "O" and b6['text'] == "O" and b9['text'] == "O":
print("O wins")
tkinter.messagebox.showinfo("Winner", "O wins")
# stop the game
window.destroy()
# check if 1st diagonal is equal to O
elif b1['text'] == "O" and b5['text'] == "O" and b9['text'] == "O":
print("O wins")
tkinter.messagebox.showinfo("Winner", "O wins")
# stop the game
window.destroy()
# check if 2nd diagonal is equal to O
elif b3['text'] == "O" and b5['text'] == "O" and b7['text'] == "O":
print("O wins")
tkinter.messagebox.showinfo("Winner", "O wins")
# stop the game
window.destroy()
# check if all buttons are filled
elif buttons == "X" or buttons == "O":
print("Draw")
tkinter.messagebox.showinfo("Winner", "Draw, Game Over!")
# stop the game
window.destroy()
except:
None
def run_game():
window.mainloop()
if __name__ == "__main__":
run_game()
else:
print("If you run the game using the launcher.py or launcher.exe.")
sleep(1)
print('Ignore this message, thank you.')
print('------------------------------------------------------')
print("Error: This is a module and not a script.")
sleep(2)
print("Please run this module as a script.")
sleep(2)
print("If you actually did run it as a script, please report this bug.")
sleep(2)
print("Raise an issue on GitHub. More details:")
sleep(2)
print("__name__ != __main__")
sleep(2)
print(" __name__ does not equal __main__ and this was made to prevent errors.")
sleep(2)
print("If you are a developer and you are seeing this message, please report this bug and (if possible, more details).")
sleep(2)
print("If you are not a developer and you are seeing this message, please report the details gaven above.")
sleep(2)
print("Thank you.")
sleep(2)
print("Omar Mostafa")
sleep(2)
print("Hope you in good health. Stay safe.")
sleep(1)
| 39.877551
| 125
| 0.523439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,604
| 0.368884
|
3f5d917c88eccde66a033389ff984f57d3efa801
| 2,336
|
py
|
Python
|
nvtabular/utils.py
|
deepyaman/NVTabular
|
b814b5ed9866be29d3c13fd00154965a3fec7fc0
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/utils.py
|
deepyaman/NVTabular
|
b814b5ed9866be29d3c13fd00154965a3fec7fc0
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/utils.py
|
deepyaman/NVTabular
|
b814b5ed9866be29d3c13fd00154965a3fec7fc0
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
try:
from numba import cuda
except ImportError:
cuda = None
try:
import psutil
except ImportError:
psutil = None
def _pynvml_mem_size(kind="total", index=0):
import pynvml
pynvml.nvmlInit()
size = None
if kind == "free":
size = int(pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(index)).free)
elif kind == "total":
size = int(pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(index)).total)
else:
raise ValueError("{0} not a supported option for device_mem_size.".format(kind))
pynvml.nvmlShutdown()
return size
def device_mem_size(kind="total", cpu=False):
# Use psutil (if available) for cpu mode
if cpu and psutil:
if kind == "total":
return psutil.virtual_memory().total
elif kind == "free":
return psutil.virtual_memory().free
elif cpu:
warnings.warn("Please install psutil for full cpu=True support.")
# Assume 1GB of memory
return int(1e9)
if kind not in ["free", "total"]:
raise ValueError("{0} not a supported option for device_mem_size.".format(kind))
try:
if kind == "free":
return int(cuda.current_context().get_memory_info()[0])
else:
return int(cuda.current_context().get_memory_info()[1])
except NotImplementedError:
if kind == "free":
# Not using NVML "free" memory, because it will not include RMM-managed memory
warnings.warn("get_memory_info is not supported. Using total device memory from NVML.")
size = _pynvml_mem_size(kind="total", index=0)
return size
def get_rmm_size(size):
return (size // 256) * 256
| 31.567568
| 99
| 0.675086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,009
| 0.431935
|
3f5e544534b4294729194a2befc9af168507a49c
| 1,908
|
py
|
Python
|
tests/segmentation/segmanagetest.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/segmentation/segmanagetest.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/segmentation/segmanagetest.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from maskgen import image_wrap
import numpy
from maskgen.segmentation.segmanage import select_region,segmentation_classification,convert_color
from tests.test_support import TestSupport
class SegManageTestCase(TestSupport):
def test_select_region(self):
img = numpy.zeros((500,500,3),dtype='uint8')
img_wrapper = image_wrap.ImageWrapper(img)
selector = numpy.zeros((500, 500, 3), dtype='uint8')
selector[30:40,30:40,:] = [200,200,100]
selector[130:140, 130:140, :] = [100, 200, 100]
selector_wrapper = image_wrap.ImageWrapper(selector)
result,rcolor = select_region(img_wrapper,selector_wrapper,convert_color('[200,200,100]'))
result = result.to_array()
self.assertTrue(numpy.all(result[30:40,30:40,3] == 255))
self.assertTrue(numpy.all(result[130:140, 130:140, 3] == 0))
self.assertEquals(rcolor,[200,200,100])
def test_select_region_anycolor(self):
img = numpy.zeros((500, 500, 3), dtype='uint8')
img_wrapper = image_wrap.ImageWrapper(img)
selector = numpy.zeros((500, 500, 3), dtype='uint8')
selector[30:40, 30:40, :] = [200, 200, 100]
selector[130:140, 130:140, :] = [100, 200, 100]
selector_wrapper = image_wrap.ImageWrapper(selector)
result,color = select_region(img_wrapper, selector_wrapper)
result = result.to_array()
self.assertTrue(numpy.all(result[30:40, 30:40, 3] != result[130:140, 130:140, 3]))
def test_segmentation_classification(self):
import os
filelocation = self.locateFile('./tests/data/classifications.csv')
self.assertEquals(segmentation_classification(os.path.dirname(filelocation),[100,100,200]),'other')
self.assertEquals(segmentation_classification(os.path.dirname(filelocation), [200,100,200]), 'house')
if __name__ == '__main__':
unittest.main()
| 44.372093
| 109
| 0.682914
| 1,654
| 0.866876
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.052935
|
3f5ea42854995b843b23cfd97be8ee560fd1c66b
| 6,932
|
py
|
Python
|
aimacode/tests/test_text.py
|
juandarr/AIND-planning
|
f74d41657d6f3d95a3b57ec4fd6e766d894d2f0d
|
[
"MIT"
] | null | null | null |
aimacode/tests/test_text.py
|
juandarr/AIND-planning
|
f74d41657d6f3d95a3b57ec4fd6e766d894d2f0d
|
[
"MIT"
] | null | null | null |
aimacode/tests/test_text.py
|
juandarr/AIND-planning
|
f74d41657d6f3d95a3b57ec4fd6e766d894d2f0d
|
[
"MIT"
] | null | null | null |
import pytest
import os
import random
from text import * # noqa
from utils import isclose, DataFile
def test_unigram_text_model():
flatland = DataFile("EN-text/flatland.txt").read()
wordseq = words(flatland)
P = UnigramTextModel(wordseq)
s, p = viterbi_segment('itiseasytoreadwordswithoutspaces', P)
assert s == [
'it', 'is', 'easy', 'to', 'read', 'words', 'without', 'spaces']
def test_shift_encoding():
code = shift_encode("This is a secret message.", 17)
assert code == 'Kyzj zj r jvtivk dvjjrxv.'
def test_shift_decoding():
flatland = DataFile("EN-text/flatland.txt").read()
ring = ShiftDecoder(flatland)
msg = ring.decode('Kyzj zj r jvtivk dvjjrxv.')
assert msg == 'This is a secret message.'
def test_rot13_encoding():
code = rot13('Hello, world!')
assert code == 'Uryyb, jbeyq!'
def test_rot13_decoding():
flatland = DataFile("EN-text/flatland.txt").read()
ring = ShiftDecoder(flatland)
msg = ring.decode(rot13('Hello, world!'))
assert msg == 'Hello, world!'
def test_counting_probability_distribution():
D = CountingProbDist()
for i in range(10000):
D.add(random.choice('123456'))
ps = [D[n] for n in '123456']
assert 1 / 7 <= min(ps) <= max(ps) <= 1 / 5
def test_ngram_models():
flatland = DataFile("EN-text/flatland.txt").read()
wordseq = words(flatland)
P1 = UnigramTextModel(wordseq)
P2 = NgramTextModel(2, wordseq)
P3 = NgramTextModel(3, wordseq)
# The most frequent entries in each model
assert P1.top(10) == [(2081, 'the'), (1479, 'of'), (1021, 'and'),
(1008, 'to'), (850, 'a'), (722, 'i'), (640, 'in'),
(478, 'that'), (399, 'is'), (348, 'you')]
assert P2.top(10) == [(368, ('of', 'the')), (152, ('to', 'the')),
(152, ('in', 'the')), (86, ('of', 'a')),
(80, ('it', 'is')),
(71, ('by', 'the')), (68, ('for', 'the')),
(68, ('and', 'the')), (62, ('on', 'the')),
(60, ('to', 'be'))]
assert P3.top(10) == [(30, ('a', 'straight', 'line')),
(19, ('of', 'three', 'dimensions')),
(16, ('the', 'sense', 'of')),
(13, ('by', 'the', 'sense')),
(13, ('as', 'well', 'as')),
(12, ('of', 'the', 'circles')),
(12, ('of', 'sight', 'recognition')),
(11, ('the', 'number', 'of')),
(11, ('that', 'i', 'had')), (11, ('so', 'as', 'to'))]
assert isclose(P1['the'], 0.0611, rel_tol=0.001)
assert isclose(P2['of', 'the'], 0.0108, rel_tol=0.01)
assert isclose(P3['', '', 'but'], 0.0, rel_tol=0.001)
assert isclose(P3['', '', 'but'], 0.0, rel_tol=0.001)
assert isclose(P3['so', 'as', 'to'], 0.000323, rel_tol=0.001)
assert P2.cond_prob.get(('went',)) is None
assert P3.cond_prob['in', 'order'].dictionary == {'to': 6}
def test_ir_system():
from collections import namedtuple
Results = namedtuple('IRResults', ['score', 'url'])
uc = UnixConsultant()
def verify_query(query, expected):
assert len(expected) == len(query)
for expected, (score, d) in zip(expected, query):
doc = uc.documents[d]
assert "{0:.2f}".format(
expected.score) == "{0:.2f}".format(score * 100)
assert os.path.basename(expected.url) == os.path.basename(doc.url)
return True
q1 = uc.query("how do I remove a file")
assert verify_query(q1, [
Results(76.83, "aimacode-data/MAN/rm.txt"),
Results(67.83, "aimacode-data/MAN/tar.txt"),
Results(67.79, "aimacode-data/MAN/cp.txt"),
Results(66.58, "aimacode-data/MAN/zip.txt"),
Results(64.58, "aimacode-data/MAN/gzip.txt"),
Results(63.74, "aimacode-data/MAN/pine.txt"),
Results(62.95, "aimacode-data/MAN/shred.txt"),
Results(57.46, "aimacode-data/MAN/pico.txt"),
Results(43.38, "aimacode-data/MAN/login.txt"),
Results(41.93, "aimacode-data/MAN/ln.txt"),
])
q2 = uc.query("how do I delete a file")
assert verify_query(q2, [
Results(75.47, "aimacode-data/MAN/diff.txt"),
Results(69.12, "aimacode-data/MAN/pine.txt"),
Results(63.56, "aimacode-data/MAN/tar.txt"),
Results(60.63, "aimacode-data/MAN/zip.txt"),
Results(57.46, "aimacode-data/MAN/pico.txt"),
Results(51.28, "aimacode-data/MAN/shred.txt"),
Results(26.72, "aimacode-data/MAN/tr.txt"),
])
q3 = uc.query("email")
assert verify_query(q3, [
Results(18.39, "aimacode-data/MAN/pine.txt"),
Results(12.01, "aimacode-data/MAN/info.txt"),
Results(9.89, "aimacode-data/MAN/pico.txt"),
Results(8.73, "aimacode-data/MAN/grep.txt"),
Results(8.07, "aimacode-data/MAN/zip.txt"),
])
q4 = uc.query("word count for files")
assert verify_query(q4, [
Results(128.15, "aimacode-data/MAN/grep.txt"),
Results(94.20, "aimacode-data/MAN/find.txt"),
Results(81.71, "aimacode-data/MAN/du.txt"),
Results(55.45, "aimacode-data/MAN/ps.txt"),
Results(53.42, "aimacode-data/MAN/more.txt"),
Results(42.00, "aimacode-data/MAN/dd.txt"),
Results(12.85, "aimacode-data/MAN/who.txt"),
])
q5 = uc.query("learn: date")
assert verify_query(q5, [])
q6 = uc.query("2003")
assert verify_query(q6, [
Results(14.58, "aimacode-data/MAN/pine.txt"),
Results(11.62, "aimacode-data/MAN/jar.txt"),
])
def test_words():
assert words("``EGAD!'' Edgar cried.") == ['egad', 'edgar', 'cried']
def test_canonicalize():
assert canonicalize("``EGAD!'' Edgar cried.") == 'egad edgar cried'
def test_translate():
text = 'orange apple lemon '
func = lambda x: ('s ' + x) if x ==' ' else x
assert translate(text, func) == 'oranges apples lemons '
def test_bigrams():
assert bigrams('this') == ['th', 'hi', 'is']
assert bigrams(['this', 'is', 'a', 'test']) == [['this', 'is'], ['is', 'a'], ['a', 'test']]
# TODO: for .ipynb
"""
>>> P1.samples(20)
'you thought known but were insides of see in depend by us dodecahedrons just but i words are instead degrees'
>>> P2.samples(20)
'flatland well then can anything else more into the total destruction and circles teach others confine women must be added'
>>> P3.samples(20)
'flatland by edwin a abbott 1884 to the wake of a certificate from nature herself proving the equal sided triangle'
"""
if __name__ == '__main__':
pytest.main()
| 33.326923
| 124
| 0.542268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,420
| 0.349106
|
3f5f2e64673d1e50e2bb1b2fb203375596490210
| 5,983
|
py
|
Python
|
implementations/python3/pysatl/apdu_tool.py
|
sebastien-riou/SATL
|
b95d0e784d2e8e1384381d4d5b8b448d3d1798cf
|
[
"Apache-2.0"
] | 4
|
2020-05-13T10:13:55.000Z
|
2021-10-20T04:43:07.000Z
|
implementations/python3/pysatl/apdu_tool.py
|
TiempoSecure/SATL
|
b95d0e784d2e8e1384381d4d5b8b448d3d1798cf
|
[
"Apache-2.0"
] | 4
|
2020-07-22T16:06:31.000Z
|
2021-07-25T19:51:41.000Z
|
implementations/python3/pysatl/apdu_tool.py
|
TiempoSecure/SATL
|
b95d0e784d2e8e1384381d4d5b8b448d3d1798cf
|
[
"Apache-2.0"
] | 2
|
2019-05-12T21:15:00.000Z
|
2020-09-23T09:05:24.000Z
|
import re
import argparse
import os
import sys
import logging
import traceback
import pysatl
class EtsiTs101955(object):
COMMENT_MARKER = "REM"
COMMAND_MARKER = "CMD"
RESET_MARKER = "RST"
INIT_MARKER = "INI"
OFF_MARKER = "OFF"
def __init__(self, cmdHandler):
self._cmdHandler = cmdHandler
def runStream(self, scriptStream, *, line_cnt = 0):
lineBuf = ""
for line in scriptStream:
line_cnt += 1
if line in ["\n", "\r"]:
line = ""
elif len(line):
while line[-1] in ["\n", "\r"]: # remove end of line characters
line = line[:-1]
if 0 == len(line):
continue
if 0 == len(line):
continue
lineBreak = line[-1] == "\\"
if lineBreak:
lineBuf += line[:-1]
continue
line = lineBuf + line
lineBuf = ""
logging.debug("line %4d: '%s'" % (line_cnt, line))
if 0 == len(line):
continue
if line.startswith(EtsiTs101955.COMMENT_MARKER):
self._cmdHandler.comment(line[len(EtsiTs101955.COMMENT_MARKER):])
continue
tokens = line.split()
if tokens[0] == EtsiTs101955.RESET_MARKER:
self._cmdHandler.reset()
elif tokens[0] == EtsiTs101955.OFF_MARKER:
self._cmdHandler.off()
elif tokens[0] == EtsiTs101955.INIT_MARKER:
datstr = line[len(tokens[0]):]
dat = pysatl.Utils.ba(datstr)
self._cmdHandler.init(dat)
elif tokens[0] == EtsiTs101955.COMMAND_MARKER:
params = line[len(tokens[0]):]
cmd_params_pattern = re.compile(r"(.*)\[(.*)\]\s*\((.*)\)")
matchRes = cmd_params_pattern.match(params)
if matchRes is not None:
capdustr = matchRes.group(1)
leDatStr = matchRes.group(2).replace(" ","").replace("\t","").lower()
swStr = matchRes.group(3).replace(" ","").replace("\t","").lower()
else:
cmd_params_pattern = re.compile(r"(.*)\s*\((.*)\)")
matchRes = cmd_params_pattern.match(params)
capdustr = matchRes.group(1)
leDatStr = ""
swStr = matchRes.group(2)
swStr = swStr.replace(" ","").replace("\t","").lower()
capdu = pysatl.CAPDU.from_hexstr(capdustr)
rapdu = self._cmdHandler.apdu(capdu,leDatStr,swStr)
swlist = swStr.split(",")
swMatch = False
for sw in swlist:
swMatch |= rapdu.matchSW(sw)
if not swMatch:
raise Exception("RAPDU does not match any of the expected status word")
if not rapdu.matchDATA(leDatStr):
raise Exception("RAPDU does not match expected outgoing data")
else:
raise Exception("line %d, syntax not supported: '%s'"%(line_cnt,line))
def runFile(scriptFile, apduHandler):
tool = EtsiTs101955(apduHandler)
with open(scriptFile) as script:
tool.runStream(script)
class CmdHandler(object):
"""Base class for command handlers"""
def __init__(self):
pass
def apdu(self, capdu, leDatStr="", swStr=""):
dat = pysatl.Utils.ba(leDatStr.replace('x','0'))
sw=0
swStr = swStr.split(",")[0]
for i in range(0,len(swStr)):
d = swStr[i]
sw = (sw << 4) | int(d,16)
sw1=sw >> 8
sw2=sw & 0xFF
rapdu = pysatl.RAPDU(SW1=sw1,SW2=sw2, DATA=dat)
line = "CMD "
header_len = 4
lc=len(capdu.DATA)
if lc:
header_len = 5
if lc>255:
header_len = 7
else:
header_len = 5
if capdu.LE>256:
header_len = 7
dat = capdu.to_ba()
line += pysatl.Utils.hexstr(dat[:header_len])
if len(capdu.DATA) > 0:
line += " \\\n "
dat = capdu.DATA
while len(dat) > 16:
line += pysatl.Utils.hexstr(dat[0:16]) + " \\\n "
dat = dat[16:]
line += pysatl.Utils.hexstr(dat)
if len(rapdu.DATA) > 0:
line += " \\\n ["
dat = rapdu.DATA
while len(dat) > 16:
line += pysatl.Utils.hexstr(dat[0:16]) + " \\\n "
dat = dat[16:]
line += pysatl.Utils.hexstr(dat)
line += " ] \\\n"
elif capdu.LE > 0:
line += " []"
line += " ("+ pysatl.Utils.hexstr(rapdu.swBytes()) +")"
logging.info(line)
return rapdu
def reset(self):
logging.info("RST")
def init(self, dat):
logging.info("INIT "+pysatl.Utils.hexstr(dat))
def off(self):
logging.info("OFF")
def comment(self, msg):
logging.info("REM %s" % (msg))
class ApduTool(object):
"""ETSI TS 101 955 script player"""
def __init__(self, argv):
scriptname = os.path.basename(__file__)
parser = argparse.ArgumentParser(scriptname)
#TODO: pass argv to parser.
levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
parser.add_argument('--log-level', default='INFO', choices=levels)
parser.add_argument('--script', default="stdin", help='path to script', type=str)
options = parser.parse_args()
root = logging.getLogger()
root.setLevel(options.log_level)
if options.script == "stdin":
player = EtsiTs101955(CmdHandler())
player.runStream(sys.stdin)
else:
EtsiTs101955.runFile(options.script,CmdHandler())
if __name__ == "__main__":
ApduTool(sys.argv)
| 35.194118
| 91
| 0.503594
| 5,833
| 0.974929
| 0
| 0
| 0
| 0
| 0
| 0
| 621
| 0.103794
|
3f5f46e8ad8a46b0f24a67ae6817aebda546ccdc
| 11,979
|
py
|
Python
|
host-software/easyhid.py
|
kavka1983/key
|
9185c156fd40a1cab358b2464af4b27cacf71935
|
[
"MIT"
] | 1
|
2020-06-14T22:31:20.000Z
|
2020-06-14T22:31:20.000Z
|
host-software/easyhid.py
|
kavka1983/key
|
9185c156fd40a1cab358b2464af4b27cacf71935
|
[
"MIT"
] | null | null | null |
host-software/easyhid.py
|
kavka1983/key
|
9185c156fd40a1cab358b2464af4b27cacf71935
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
import cffi
import ctypes.util
import platform
ffi = cffi.FFI()
ffi.cdef("""
struct hid_device_info {
char *path;
unsigned short vendor_id;
unsigned short product_id;
wchar_t *serial_number;
unsigned short release_number;
wchar_t *manufacturer_string;
wchar_t *product_string;
unsigned short usage_page;
unsigned short usage;
int interface_number;
struct hid_device_info *next;
};
typedef struct hid_device_ hid_device;
int hid_init(void);
int hid_exit(void);
struct hid_device_info* hid_enumerate(unsigned short, unsigned short);
void hid_free_enumeration (struct hid_device_info *devs);
hid_device* hid_open (unsigned short vendor_id, unsigned short product_id, const wchar_t *serial_number);
hid_device* hid_open_path (const char *path);
int hid_write (hid_device *device, const unsigned char *data, size_t length);
int hid_read_timeout (hid_device *dev, unsigned char *data, size_t length, int milliseconds);
int hid_read (hid_device *device, unsigned char *data, size_t length);
int hid_set_nonblocking (hid_device *device, int nonblock);
int hid_send_feature_report (hid_device *device, const unsigned char *data, size_t length);
int hid_get_feature_report (hid_device *device, unsigned char *data, size_t length);
void hid_close (hid_device *device);
int hid_get_manufacturer_string (hid_device *device, wchar_t *string, size_t maxlen);
int hid_get_product_string (hid_device *device, wchar_t *string, size_t maxlen);
int hid_get_serial_number_string (hid_device *device, wchar_t *string, size_t maxlen);
int hid_get_indexed_string (hid_device *device, int string_index, wchar_t *string, size_t maxlen);
const wchar_t* hid_error (hid_device *device);
""")
if "Windows" in platform.platform():
try:
hidapi = ffi.dlopen('hidapi.dll')
except:
hidapi = ffi.dlopen(ctypes.util.find_library('hidapi.dll'))
else:
try:
hidapi = ffi.dlopen('hidapi-libusb')
except:
hidapi = ffi.dlopen(ctypes.util.find_library('hidapi-libusb'))
def _c_to_py_str(val):
if val == ffi.NULL:
return None
new_val = ffi.string(val)
if type(new_val) == bytes or type(new_val) == bytearray:
return new_val.decode("utf-8")
else:
return new_val
class HIDException(Exception):
pass
class Device:
def __init__(self, cdata):
"""
"""
if cdata == ffi.NULL:
raise TypeError
self.path = _c_to_py_str(cdata.path)
self.vendor_id = cdata.vendor_id
self.product_id = cdata.product_id
self.release_number = cdata.release_number
self.manufacturer_string = _c_to_py_str(cdata.manufacturer_string)
self.product_string = _c_to_py_str(cdata.product_string)
self.serial_number = _c_to_py_str(cdata.serial_number)
self.usage_page = cdata.usage_page
self.usage = cdata.usage
self.interface_number = cdata.interface_number
self._device = None
self._is_open = False
def __del__(self):
self.close()
def open(self):
if self._is_open:
raise HIDException("Failed to open device: Device already open")
path = self.path.encode('utf-8')
dev = hidapi.hid_open_path(path)
if dev:
self._is_open = True
self._device = dev
else:
raise HIDException("Failed to open device")
def close(self):
"""
Closes the hid device
"""
if self._is_open:
self._is_open = False
hidapi.hid_close(self._device)
def description(self):
return self.info.description()
def write(self, data, report_id=0):
"""
Writes `bytes` to the hid device.
"""
if not self._is_open:
raise HIDException("Device not open")
write_data = bytearray([report_id]) + bytearray(data)
cdata = ffi.new("const unsigned char[]", bytes(write_data))
num_written = hidapi.hid_write(self._device, cdata, len(write_data))
if num_written < 0:
raise HIDException("Failed to write to HID device: " + str(num_written))
else:
return num_written
def read(self, size=64, timeout=None):
"""
Read from the hid device. Returns bytes read or None if no bytes read.
size: number of bytes to read
timeout: length to wait in milliseconds
"""
if not self._is_open:
raise HIDException("Device not open")
data = [0] * size
cdata = ffi.new("unsigned char[]", data)
bytes_read = 0
if timeout == None:
bytes_read = hidapi.hid_read(self._device, cdata, len(cdata))
else:
bytes_read = hidapi.hid_read_timeout(self._device, cdata, len(cdata), timeout)
if bytes_read < 0:
raise HIDException("Failed to read from HID device: " + str(bytes_read))
elif bytes_read == 0:
return None
else:
return bytearray(cdata)
def set_nonblocking(self, enable_nonblocking):
if not self._is_open:
raise HIDException("Device not open")
if type(enable_nonblocking) != bool:
raise TypeError
hidapi.hid_set_nonblocking(self._device, enable_nonblocking)
def is_open(self):
return _is_open
def is_connected(self):
"""
Checks if the USB device is still connected
"""
if self._is_open:
err = hidapi.hid_read_timeout(self._device, ffi.NULL, 0, 0)
if err == -1:
return False
else:
return True
else:
en = Enumeration(vid=self.vendor_id, pid=self.product_id).find(path=self.path)
if len(en) == 0:
return False
else:
return True
# int hid_send_feature_report (hid_device *device, const unsigned char *data, size_t length);
# def send_feature_report(self, data):
# cdata = ffi.new("const unsigned char[]", data)
# hidapi.hid_send_feature_report(self._device, cdata, length)
# pass
# def get_feature_report(self, size=64):
# hid_data = bytes([report_id]) + bytes(data)
# cdata = ffi.new("unsigned char[]", data)
# hidapi.hid_send_feature_report(self._device, cdata, length)
# pass
def get_error(self):
err_str = hidapi.hid_error(self._device)
if err_str == ffi.NULL:
return None
else:
return ffi.string(err_str)
def _get_prod_string_common(self, hid_fn):
max_len = 128
str_buf = ffi.new("wchar_t[]", bytearray(max_len).decode('utf-8'))
ret = hid_fn(self._device, str_buf, max_len)
if ret < 0:
raise HIDException(self._device.get_error())
else:
assert(ret == 0)
return ffi.string(str_buf)
# Probably don't need these excpet for get_indexed_string, since they won't
# change from the values found in the enumeration
def get_manufacture_string(self):
"""
Get the manufacturer string of the device from its device descriptor
"""
return self._get_prod_string_common(hidapi.hid_get_manufacturer_string)
def get_product_string(self):
"""
Get the product string of the device from its device descriptor
"""
return self._get_prod_string_common(hidapi.hid_get_product_string)
def get_serial_number(self):
"""
Get the serial number string of the device from its device descriptor
"""
return self._get_prod_string_common(hidapi.hid_get_serial_number_string)
def get_indexed_string(self, index):
"""
Get the string with the given index from the device
"""
max_len = 128
str_buf = ffi.new("wchar_t[]", str(bytearray(max_len)))
ret = hidapi.hid_get_indexed_string(self._device, index, str_buf, max_len)
if ret < 0:
raise HIDException(self._device.get_error())
elif ret == 0:
return None
else:
return ffi.string(str_buf).encode('utf-8')
def description(self):
return \
"""Device:
{} | {:x}:{:x} | {} | {} | {}
release_number: {}
usage_page: {}
usage: {}
interface_number: {}\
""".format(self.path,
self.vendor_id,
self.product_id,
self.manufacturer_string,
self.product_string,
self.serial_number,
self.release_number,
self.usage_page,
self.usage,
self.interface_number
)
class Enumeration:
def __init__(self, vid=0, pid=0):
self.device_list = _hid_enumerate(vid, pid)
def show(self):
for dev in self.device_list:
print(dev.description())
def find(self, vid=None, pid=None, serial=None, interface=None, \
path=None, release_number=None, manufacturer=None,
product=None, usage=None, usage_page=None):
"""
Attempts to open a device in the HID enumeration list. This function
is only away of devices that were present when the object was created.
"""
result = []
for dev in self.device_list:
if vid and dev.vendor_id != vid:
continue
if pid and dev.product_id != pid:
continue
if serial and dev.serial_number != serial:
continue
if interface and dev.interface_number != interface:
continue
if path and dev.path != path:
continue
if manufacturer and dev.manufacturer_string != manufacturer:
continue
if product and dev.product_string != product:
continue
if release_number and dev.release_number != release_number:
continue
if usage and dev.usage != usage:
continue
if usage_page and dev.usage_page != usage_page:
continue
result.append(dev)
return result
def _hid_enumerate(vendor_id=0, product_id=0):
"""
Enumerates all the hid devices for VID:PID. Returns a list of `DeviceInfo`.
If vid is 0, then match any vendor id. Similarly, if pid is 0, match any
product id. If both are zero, enumerate all HID devices.
"""
start = hidapi.hid_enumerate(vendor_id, product_id)
result = []
cur = ffi.new("struct hid_device_info*");
cur = start
# Copy everything into python list
while cur != ffi.NULL:
result.append(Device(cur))
cur = cur.next
# Free the C memory
hidapi.hid_free_enumeration(start)
return result
# def hid_open(vendor_id, product_id, serial=None):
# """
# """
# if serial == None:
# serial = ffi.NULL
# else:
# if type(serial) == bytes or type(serial) == bytearray:
# serial = serial.decode('utf-8')
# serial = ffi.new("wchar_t[]", serial)
# dev = hidapi.hid_open(vendor_id, product_id, serial)
# if dev:
# return Device(dev)
# else:
# None
if __name__ == "__main__":
# Examples
from easyhid import Enumeration
# Stores an enumertion of all the connected USB HID devices
en = Enumeration()
# return a list of devices based on the search parameters
devices = en.find(manufacturer="Company", product="Widget", interface=3)
# print a description of the devices found
for dev in devices:
print(dev.description())
# open a device
dev.open()
# write some bytes to the device
dev.write(bytearray([0, 1, 2, 3]))
# read some bytes
print(dev.read())
# close a device
dev.close()
| 31.690476
| 105
| 0.622339
| 7,910
| 0.660322
| 0
| 0
| 0
| 0
| 0
| 0
| 4,714
| 0.393522
|
3f6013f8688ca16e27ef6533df61114f0ade964b
| 22,620
|
py
|
Python
|
rules/check_imported_dlls.py
|
deptofdefense/SalSA
|
7ef771398e3d59597bade95d0a23540de0842e2a
|
[
"MIT"
] | 84
|
2018-01-07T19:43:45.000Z
|
2021-12-23T14:17:44.000Z
|
rules/check_imported_dlls.py
|
deptofdefense/SalSA
|
7ef771398e3d59597bade95d0a23540de0842e2a
|
[
"MIT"
] | 7
|
2018-04-02T20:24:28.000Z
|
2019-06-07T21:48:04.000Z
|
rules/check_imported_dlls.py
|
deptofdefense/SalSA
|
7ef771398e3d59597bade95d0a23540de0842e2a
|
[
"MIT"
] | 18
|
2017-12-26T19:44:46.000Z
|
2021-09-13T12:21:02.000Z
|
"""
Align imported dlls/functions to executable functionality.
"""
import sys
# import all supported ordinal decodings
from rules.ordinal_mappings import advapi32
from rules.ordinal_mappings import cabinet
from rules.ordinal_mappings import comctl32
from rules.ordinal_mappings import mfc42
from rules.ordinal_mappings import msvbvm60
from rules.ordinal_mappings import ntdll
from rules.ordinal_mappings import odbc32
from rules.ordinal_mappings import oleaut32
from rules.ordinal_mappings import oledlg
from rules.ordinal_mappings import propsys
from rules.ordinal_mappings import shell32
from rules.ordinal_mappings import shlwapi
from rules.ordinal_mappings import ws2_32
from rules.ordinal_mappings import wsock32
# create ordinal mappings dictionary
ords2names = {}
ords2names['advapi32.dll'] = advapi32.mapping
ords2names['cabinet.dll'] = cabinet.mapping
ords2names['comctl32.dll'] = comctl32.mapping
ords2names['mfc42.dll'] = mfc42.mapping
ords2names['msvbvm60.dll'] = msvbvm60.mapping
ords2names['ntdll.dll'] = ntdll.mapping
ords2names['odbc32.dll'] = odbc32.mapping
ords2names['oleaut32.dll'] = oleaut32.mapping
ords2names['oledlg.dll'] = oledlg.mapping
ords2names['propsys.dll'] = propsys.mapping
ords2names['shell32.dll'] = shell32.mapping
ords2names['shlwapi.dll'] = shlwapi.mapping
ords2names['ws2_32.dll'] = ws2_32.mapping
ords2names['wsock32.dll'] = wsock32.mapping
# list of targeted functions and their descriptions
targets = {
'accept': 'This function is used to listen for incoming connections. This function indicates that the program will listen for incoming connections on a socket. It is mostly used by malware to communicate with their Command and Communication server.',
'AdjustTokenPrivileges': 'This function is used to enable or disable specific access privileges. In a process injection attack, this function is used by malware to gain additional permissions.',
'AttachThreadInput': 'This function attaches the input processing from one thread to another so that the second thread receives input events such as keyboard and mouse events. Keyloggers and other spyware use this function.',
'bind': 'This function is used to associate a local address to a socket in order to listen for incoming connections.',
'BitBlt': 'This function is used to copy graphic data from one device to another. Spyware sometimes uses this function to capture screenshots.',
'CertOpenSystemStore': 'This function is used to access the certificates stored on the local system.',
'CheckRemoteDebuggerPresent': 'Determines whether the specified process is being debugged. Used by malware to detect and evade reversing.',
'connect': 'This function is used to connect to a remote socket. Malware often uses low-level functionality to connect to a command-and-control server. It is mostly used by malware to communicate with their Command and Communication server.',
'ConnectNamedPipe': 'This function is used to create a server pipe for interprocess communication that will wait for a client pipe to connect. Backdoors and reverse shells sometimes use ConnectNamedPipe to simplify connectivity to a command-and-control server.',
'ControlService': 'This function is used to start, stop, modify, or send a signal to a running service. If malware is using its own malicious service, code needs to be analyzed that implements the service in order to determine the purpose of the call.',
'CreateFile': 'Creates a new file or opens an existing file.',
'CreateFileMapping': 'This function is used to create a handle to a file mapping that loads a file into memory and makes it accessible via memory addresses. Launchers, loaders, and injectors use this function to read and modify PE files.',
'CreateMutex': 'This function creates a mutual exclusion object that can be used by malware to ensure that only a single instance of the malware is running on a system at any given time. Malware often uses fixed names for mutexes, which can be good host-based indicators to detect additional installations of the malware.',
'CreateProcess': 'This function creates and launches a new process. If malware creates a new process, new process needs to be analyzed as well.',
'CreateRemoteThread': 'This function is used to start a thread in a remote process. Launchers and stealth malware use CreateRemoteThread to inject code into a different process.',
'CreateService': 'This function is used to create a service that can be started at boot time. Malware uses CreateService for persistence, stealth, or to load kernel drivers.',
'CreateToolhelp32Snapshot': 'This function is used to create a snapshot of processes, heaps, threads, and modules. Malware often uses this function as part of code that iterates through processes or threads.',
'CryptAcquireContext': 'This function is often the first function used by malware to initialize the use of Windows encryption.',
'DeviceIoControl': 'This function sends a control message from user space to a device driver. Kernel malware that needs to pass information between user space and kernel space often use this function.',
'DllFunctionCall': 'THis function is used to import a DLL within a visual basic executable. This indicates malware with visual basic functionality.',
'EnableExecuteProtectionSupport': 'This function is used to modify the Data Execution Protection (DEP) settings of the host, making it more susceptible to attack.',
'EnumProcesses': 'This function is used to enumerate through running processes on the system. Malware often enumerates through processes to find a process into which to inject.',
'EnumProcessModules': 'This function is used to enumerate the loaded modules (executables and DLLs) for a given process. Malware enumerates through modules when doing an injection.',
'FindFirstFile': 'This function is used to search through a directory and enumerate the file system.',
'FindNextFile': 'This function is used to search through a directory and enumerate the file system.',
'FindResource': 'This function is used to find a resource in an executable or loaded DLL. Malware sometimes uses resources to store strings, configuration information, or other malicious files. If this function is used, then check for an .rsrc section in the malware`s PE header.',
'FindWindow': 'This function is used to search for an open window on the desktop. Sometimes this function is used as an anti-debugging technique to search for OllyDbg windows.',
'FtpOpenFile': 'This function is used to open a file on a remote FTP server.',
'FtpPutFile': 'This function is used to upload a file to remote FTP server.',
'GetAdaptersInfo': 'This function is used to obtain information about the network adapters on the system. Backdoors sometimes call GetAdaptersInfo in the information-gathering phase to gather information about infected machines. In some cases, it`s used to gather MAC addresses to check for VMware as part of anti-virtual machine techniques.',
'GetAsyncKeyState': 'This function is used to determine whether a particular key is being pressed. Malware sometimes uses this function to implement a keylogger.',
'GetClipboardData': 'This function is used to read user clipboard data and is sometimes used in keyloggers.',
'GetDC': 'This function returns a handle to a device context for a window or the whole screen. Spyware that takes screen captures often uses this function.',
'GetForegroundWindow': 'This function returns a handle to the window currently in the foreground of the desktop. Keyloggers commonly use this function to determine in which window the user is entering his keystrokes.',
'gethostbyname': 'This function is used to perform a DNS lookup on a particular hostname prior to making an IP connection to a remote host. Hostnames that serve as command-and-control servers often make good network-based signatures.',
'gethostname': 'This function is used to retrieve the hostname of the computer. Backdoors sometimes use gethostname in information gathering phase of the victim machine.',
'GetKeyState': 'This function is used by keyloggers to obtain the status of a particular key on the keyboard.',
'GetModuleFilename': 'This function returns the filename of a module that is loaded in the current process. Malware can use this function to modify or copy files in the currently running process.',
'GetModuleHandle': 'This function is used to obtain a handle to an already loaded module. Malware may use GetModuleHandle to locate and modify code in a loaded module or to search for a good location to inject code.',
'GetProcAddress': 'This function is used to retrieve the address of a function in a DLL loaded into memory. This is used to import functions from other DLLs in addition to the functions imported in the PE file header.',
'GetStartupInfo': 'This function is used to retrieve a structure containing details about how the current process was configured to run, such as where the standard handles are directed.',
'GetSystemDefaultLangId': 'This function returns the default language settings for the system. These are used by malwares by specifically designed for region-based attacks.',
'GetTempPath': 'This function returns the temporary file path. If malware call this function, check whether it reads or writes any files in the temporary file path.',
'GetThreadContext': 'This function returns the context structure of a given thread. The context for a thread stores all the thread information, such as the register values and current state.',
'GetVersionEx': 'This function returns information about which version of Windows is currently running. This can be used as part of a victim survey, or to select between different offsets for undocumented structures that have changed between different versions of Windows.',
'GetWindowDC': 'This function retrieves the device context (DC) for the entire window, including title bar, menus, and scroll bars. Used to take a screenshot of a particular GUI window (like a browser).',
'GetWindowsDirectory': 'This function returns the file path to the Windows directory (usually C:\\Windows). Malware sometimes uses this call to determine into which directory to install additional malicious programs.',
'GetWindowText': 'This function gets the title of all program windows for the current user. Used to enumerate processes that have a GUI interface.',
'HttpOpenRequest': 'This function sets up the OS resources for an HTTP request.',
'HttpSendRequest': 'This function actually makes an outgoing HTTP connection.',
'inet_addr': 'This function converts an IP address string like 127.0.0.1 so that it can be used by functions such as connect. The string specified can sometimes be used as a network-based signature.',
'InternetOpen': 'This function initializes the high-level Internet access functions from WinINet, such as InternetOpenUrl and InternetReadFile. Searching for InternetOpen is a good way to find the start of Internet access functionality. One of the parameters to InternetOpen is the User-Agent, which can sometimes make a good network-based signature.',
'InternetOpenUrl': 'This function opens a specific URL for a connection using FTP, HTTP, or HTTPS.URLs, if fixed, can often be good network-based signatures.',
'InternetReadFile': 'This function reads data from a previously opened URL.',
'InternetWriteFile': 'This function writes data to a previously opened URL.',
'IsDebuggerPresent': 'Determines whether the calling process is being debugged by a user-mode debugger. Used by malware to detect and evade reversing.',
'IsNTAdmin': 'This function checks if the user has administrator privileges.',
'IsUserAnAdmin': 'This function checks if the user has administrator privileges.',
'IsWoW64Process': 'This function is used by a 32-bit process to determine if it is running on a 64-bit operating system.',
'LdrLoadDll': 'This is a low-level function to load a DLL into a process, just like LoadLibrary. Normal programs use LoadLibrary, and the presence of this import may indicate a program that is attempting to be stealthy.',
'LoadLibrary': 'This is the standard fucntion to load a DLL into a process at runtime.',
'LoadResource': 'This function loads a resource from a PE file into memory. Malware sometimes uses resources to store strings, configuration information, or other malicious files.',
'LsaEnumerateLogonSessions': 'This function is used to enumerate through logon sessions on the current system, which can be used as part of a credential stealer.',
'MapViewOfFile': 'This function is used to map a file into memory and makes the contents of the file accessible via memory addresses. Launchers, loaders, and injectors use this function to read and modify PE files. By using MapViewOfFile, the malware can avoid using WriteFile to modify the contents of a file.',
'MapVirtualKey': 'This function is used to translate a virtual-key code into a character value. It is often used by keylogging malware.',
'Module32First/Module32Next': 'This function is used to enumerate through modules loaded into a process. Injectors use this function to determine where to inject code.',
'NetScheduleJobAdd': 'This function submits a request for a program to be run at a specified date and time. Malware can use NetScheduleJobAdd to run a different program. This is an important indicator to see the program that is scheduled to run at future time.',
'NetShareEnum': 'This function is used to enumerate network shares.',
'NtQueryDirectoryFile': 'This function returns information about files in a directory. Rootkits commonly hook this function in order to hide files.',
'NtQueryInformationProcess': 'This function is used to return various information about a specified process. This function is sometimes used as an anti-debugging technique because it can return the same information as CheckRemoteDebuggerPresent.',
'NtSetInformationProcess': 'This function is used to change the privilege level of a program or to bypass Data Execution Prevention (DEP).',
'OpenMutex': 'This function opens a handle to a mutual exclusion object that can be used by malware to ensure that only a single instance of malware is running on a system at any given time. Malware often uses fixed names for mutexes, which can be good host-based indicators.',
'OpenProcess': 'This function is used to open a handle to another process running on the system. This handle can be used to read and write to the other process memory or to inject code into the other process.',
'OutputDebugString': 'This function is used to output a string to a debugger if one is attached. This can be used as an anti-debugging technique.',
'PeekNamedPipe': 'This function is used to copy data from a named pipe without removing data from the pipe. This function is popular with reverse shells.',
'Process32First': 'This function is used to begin enumerating processes from a previous call to CreateToolhelp32Snapshot. Malware often enumerates through processes to find a process into which to inject.',
'Process32Next': 'This function is used to begin enumerating processes from a previous call to CreateToolhelp32Snapshot. Malware often enumerates through processes to find a process into which to inject.',
'QueueUserAPC': 'This function is used to execute code for a different thread. Malware sometimes uses QueueUserAPC to inject code into another process.',
'ReadProcessMemory': 'This function is used to read the memory of a remote process.',
'recv': 'This function is used to receive data from a remote machine. Malware often uses this function to receive data from a remote command-and-control server.',
'RegCreateKey': 'This function is used to create a handle to a new registry key for reading and editing. Registry keys are sometimes written as a way for software to achieve persistence on a host. The registry also contains a whole host of operating system and application setting information.',
'RegisterHotKey': 'This function is used to register a handler to be notified anytime a user enters a particular key combination (like CTRL-ALT-J), regardless of which window is active when the user presses the key combination. This function is sometimes used by spyware that remains hidden from the user until the key combination is pressed.',
'RegOpenKey': 'This function is used to open a handle to a registry key for reading and editing. Registry keys are sometimes written as a way for software to achieve persistence on a host. The registry also contains a whole host of operating system and application setting information.',
'ResumeThread': 'This function is used to resume a previously suspended thread. ResumeThread is used as part of several injection techniques.',
'RtlCreateRegistryKey': 'This function is used to create a registry from kernel-mode code.',
'RtlWriteRegistryValue': 'This function is used to write a value to the registry from kernel-mode code.',
'SamIConnect': 'This function is used to connect to the Security Account Manager (SAM) in order to make future calls that access credential information. Hash-dumping programs access the SAM database in order to retrieve the hash of users` login passwords.',
'SamIGetPrivateData': 'This function is used to query the private information about a specific user from the Security Account Manager (SAM) database. Hash-dumping programs access the SAM database in order to retrieve the hash of users` login passwords.',
'SamQueryInformationUse': 'This function is used to query information about a specific user in the Security Account Manager (SAM) database. Hash-dumping programs access the SAM database in order to retrieve the hash of users` login passwords.',
'send': 'This function is used to send data to a remote machine. It is often used by malwares to send data to a remote command-and-control server.',
'SetFileTime': 'This function is used to modify the creation, access, or last modified time of a file. Malware often uses this function to conceal malicious activity.',
'SetThreadContext': 'This function is used to modify the context of a given thread. Some injection techniques use SetThreadContext.',
'SetWindowsHookEx': 'This function is used to set a hook function to be called whenever a certain event is called. Commonly used with keyloggers and spyware, this function also provides an easy way to load a DLL into all GUI processes on the system. This function is sometimes added by the compiler.',
'SfcTerminateWatcherThread': 'This function is used to disable Windows file protection and modify files that otherwise would be protected.',
'ShellExecute': 'This function is used to execute another program.',
'StartServiceCtrlDispatcher': 'This function is used by a service to connect the main thread of the process to the service control manager. Any process that runs as a service must call this function within 30 seconds of startup. Locating this function in malware will tell that the function should be run as a service.',
'SQLConnect': 'This function establishes a connection with a driver and data source to allow for data to be shared with the driver/data source.',
'SuspendThread': 'This function is used to suspend a thread so that it stops running. Malware will sometimes suspend a thread in order to modify it by performing code injection.',
'System': 'This function is used to run another program provided by some C runtime libraries. On Windows, this function serves as a wrapper function to CreateProcess.',
'Thread32First/Thread32Next': 'This function is used to iterate through the threads of a process. Injectors use these functions to find an appropriate thread into which to inject.',
'ThunRTMain': 'Thsi function is used as the entry point to a visual basic executable. This indicates malware with visual basic functionality.',
'Toolhelp32ReadProcessMemory': 'This function is used to read the memory of a remote process.',
'URLDownloadToFile': 'This function is used to download a file from a web server and save it to disk. This function is popular with downloaders because it implements all the functionality of a downloader in one function call.',
'VirtualAllocEx': 'This function is a memory-allocation routine that can allocate memory in a remote process. Malware sometimes uses VirtualAllocEx as part of process injection.',
'VirtualProtectEx': 'This function is used to change the protection on a region of memory. Malware may use this function to change a read-only section of memory to an executable.',
'WideCharToMultiByte': 'This function is used to convert a Unicode string into an ASCII string.',
'WinExec': 'This function is used to execute another program.',
'WriteProcessMemory': 'This function is used to write data to a remote process. Malware uses WriteProcessMemory as part of process injection.',
'WSAStartup': 'This function is used to initialize low-level network functionality. Finding calls to WSAStartup can often be an easy way to locate the start of network related functionality.',
'Zombie_AddRef': 'This function is used to make a call to a visual basic subroutine. This indicates malware with visual basic functionality.'
}
# constant for an unknown import by ordinal
ORDINAL_DESC = 'Ordinal is decoded at runtime. To see ordinal mapping, Download the DLL and use the parse_exports() method of the PE class.'
def run(peobject):
found = []
alerts = []
# search for functionality in imports list
for dll in peobject.parse_imports():
# loop through each function in the DLL
for f in dll['functions']:
name = f['name']
# check for dll import by ordinal and try to resolve it
if f['ordinal']:
if (dll['dll'].lower() in ords2names) and (f['ordinal'] in ords2names[dll['dll'].lower()]):
name = ords2names[dll['dll'].lower()][f['ordinal']]
else:
# unknown dll with ordinal import
target = ''.join(['[', dll['dll'], '] ordinal(', hex(f['ordinal']).rstrip('L'), ') : ', ORDINAL_DESC])
if target not in found:
found.append(target)
# check for function name in targets
match = [k for k in targets if name in k]
if match:
target = ''.join(['[', dll['dll'], '] ', match[0], ' : ', targets[match[0]]])
if target not in found:
found.append(target)
# this rule generates only one alert
if found:
alerts.append({
'title': 'Suspicious Imports',
'description': 'These are imported functions by the executable that indicate functionality.',
'data': found,
'code': '',
})
return alerts
| 119.052632
| 354
| 0.780858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20,113
| 0.889169
|
3f6145c13e10fe4a1dbf8c0b4288b82e127765e5
| 4,046
|
py
|
Python
|
mqttVec.py
|
Hamlet3000/mqttVec
|
65b02446d23ce7c4583b4bf5c7cbe7a84cab0c67
|
[
"CC0-1.0"
] | null | null | null |
mqttVec.py
|
Hamlet3000/mqttVec
|
65b02446d23ce7c4583b4bf5c7cbe7a84cab0c67
|
[
"CC0-1.0"
] | null | null | null |
mqttVec.py
|
Hamlet3000/mqttVec
|
65b02446d23ce7c4583b4bf5c7cbe7a84cab0c67
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
import anki_vector
import paho.mqtt.client as mqtt
import time
###############################################################################
def main():
voltage = 0
batlevel = 0
charging = 0
docked = 0
status = "error"
ltime = time.strftime("%d.%m.%Y %H:%M:%S")
try:
# Connect to Vector and get battery info
with anki_vector.Robot(behavior_control_level=None,
cache_animation_lists=False) as robot:
battery_state = robot.get_battery_state()
voltage = battery_state.battery_volts
batlevel = battery_state.battery_level
charging = battery_state.is_charging
docked = battery_state.is_on_charger_platform
status = get_status(robot)
except:
print("couldn't connect to Vector")
# In the openHAB channel, use a jsonpath transform to get specific values like this: JSONPATH:$..voltage
data = {}
data['robots'] = []
data['robots'].append({
'name': 'Vector Green',
'voltage': voltage,
'batlevel': batlevel,
'charging': charging,
'docked': docked,
'time': ltime,
'status': status
})
# Configure and publish data to mqtt
do_mqtt(data)
###############################################################################
def get_status(robot):
status = "error"
if robot.status.are_motors_moving:
status = "Vector is moving"
if robot.status.are_wheels_moving:
status = "Vector's wheels are moving"
if robot.status.is_animating:
status = "Vector is animating"
if robot.status.is_being_held:
status = "Vector is being held"
if robot.status.is_button_pressed:
status = "Vector's button was button pressed"
if robot.status.is_carrying_block:
status = "Vector is carrying his block"
if robot.status.is_charging:
status = "Vector is currently charging"
if robot.status.is_cliff_detected:
status = "Vector has detected a cliff"
if robot.status.is_docking_to_marker:
status = "Vector has found a marker and is docking to it"
if robot.status.is_falling:
status = "Vector is falling"
if robot.status.is_head_in_pos:
status = "Vector's head is in position"
if robot.status.is_in_calm_power_mode:
status = "Vector is in calm power mode"
if robot.status.is_lift_in_pos:
status = "Vector's arm is in position"
if robot.status.is_on_charger:
status = "Vector is on the charger"
if robot.status.is_pathing:
status = "Vector is traversing a path"
if robot.status.is_picked_up:
status = "Vector is picked up"
if robot.status.is_robot_moving:
status = "Vector is in motion"
return status
###############################################################################
def on_publish(client, userdata, mid):
print("Message published to broker")
###############################################################################
def do_mqtt(data):
# define variables for MQTT
MQTT_HOST = "192.168.0.7"
MQTT_TOPIC = "Vector"
MQTT_PORT = 1883
MQTT_KEEPALIVE_INTERVAL = 20
MQTT_USER = "YOUR_MQTT_USER"
MQTT_PW = "YOUR_MQTT_PW"
# Convert it to text? Not sure why I did this but it works. Yay, 1am programming.
MQTT_MSG = str(data)
# Initiate MQTT Client
mqttc = mqtt.Client()
# Set username and password for the Broker
mqttc.username_pw_set(MQTT_USER, MQTT_PW)
# Register publish callback function
#mqttc.on_publish = on_publish
# Connect with MQTT Broker
mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
# Publish message to MQTT Broker
mqttc.publish(MQTT_TOPIC,MQTT_MSG)
# Disconnect from MQTT_Broker
mqttc.disconnect()
###############################################################################
if __name__ == "__main__":
main()
| 31.364341
| 108
| 0.579832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,627
| 0.402126
|
3f6337b447bfd743f2d66c927077a80e24dcc381
| 428
|
py
|
Python
|
examples/scanner_ibeacon_example.py
|
hbcho/beacontools1
|
82606d871c72bccb8962f50cb374595bcc2ab77d
|
[
"MIT"
] | null | null | null |
examples/scanner_ibeacon_example.py
|
hbcho/beacontools1
|
82606d871c72bccb8962f50cb374595bcc2ab77d
|
[
"MIT"
] | null | null | null |
examples/scanner_ibeacon_example.py
|
hbcho/beacontools1
|
82606d871c72bccb8962f50cb374595bcc2ab77d
|
[
"MIT"
] | null | null | null |
import time
from beacontools import BeaconScanner, IBeaconFilter
def callback(bt_addr, rssi, packet, additional_info):
print("<%s, %d> %s %s" % (bt_addr, rssi, packet, additional_info))
# scan for all iBeacon advertisements from beacons with the specified uuid
scanner = BeaconScanner(callback,
device_filter=IBeaconFilter(uuid="e2c56db5-dffb-48d2-b060-d0f5a71096e0")
)
scanner.start()
time.sleep(10)
scanner.stop()
| 30.571429
| 76
| 0.764019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 129
| 0.301402
|
3f69604976a8b164013d06794b381ad086d3bb5d
| 499
|
py
|
Python
|
desafio61.py
|
rafarbop/Python
|
e304993a6c73eacd8fffd7c67286206516e5faea
|
[
"MIT"
] | null | null | null |
desafio61.py
|
rafarbop/Python
|
e304993a6c73eacd8fffd7c67286206516e5faea
|
[
"MIT"
] | null | null | null |
desafio61.py
|
rafarbop/Python
|
e304993a6c73eacd8fffd7c67286206516e5faea
|
[
"MIT"
] | null | null | null |
# Desafio 61 Curso em Video Python
# By Rafabr
from estrutura_modelo import cabecalho, rodape
cabecalho(61, "Termos de uma Progressão Aritmética - II")
while True:
try:
p0 = float(input('Digite o Termo inicial da PA: '))
r = float(input('Digite a razão da PA: '))
except ValueError:
print('Voçe digitou um valor indevido!\n')
continue
break
n = 1
print()
while (n <= 10):
print(f'Termo {n}:'.ljust(10) + f'{p0 + (n-1)*r}')
n += 1
rodape()
| 18.481481
| 59
| 0.607214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 212
| 0.421471
|
3f69df2d4bab9abaeedc1a340100793f3dcde991
| 485
|
py
|
Python
|
setup.py
|
leandron/steinlib
|
bbc0295fb1ebf8dce7d06c750db126f6240b4617
|
[
"MIT"
] | 4
|
2017-04-02T14:14:32.000Z
|
2021-10-12T23:22:17.000Z
|
setup.py
|
leandron/steinlib
|
bbc0295fb1ebf8dce7d06c750db126f6240b4617
|
[
"MIT"
] | null | null | null |
setup.py
|
leandron/steinlib
|
bbc0295fb1ebf8dce7d06c750db126f6240b4617
|
[
"MIT"
] | null | null | null |
from setuptools import setup
tests_require = [
'cov-core',
'mock',
'nose2',
]
setup(name='steinlib',
version='0.1',
description='Python bindings for Steinlib format.',
url='http://github.com/leandron/steinlib',
author='Leandro Nunes',
author_email='leandron85@gmail.com',
license='MIT',
packages=['steinlib'],
tests_require=tests_require,
test_suite='nose2.collector.collector',
zip_safe=False)
| 24.25
| 57
| 0.618557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.395876
|
3f6a18b0d4c80fcdd062def647e4e3d88b2df3b9
| 55,602
|
py
|
Python
|
usdzconvert/usdStageWithFbx.py
|
summertriangle-dev/usdzconvert-docker
|
9953845f3a83f8cc3d5380a4ccae8bc39753d550
|
[
"MIT"
] | 3
|
2021-03-10T00:34:18.000Z
|
2021-10-14T02:52:41.000Z
|
usdzconvert/usdStageWithFbx.py
|
summertriangle-dev/usdzconvert-docker
|
9953845f3a83f8cc3d5380a4ccae8bc39753d550
|
[
"MIT"
] | null | null | null |
usdzconvert/usdStageWithFbx.py
|
summertriangle-dev/usdzconvert-docker
|
9953845f3a83f8cc3d5380a4ccae8bc39753d550
|
[
"MIT"
] | null | null | null |
from pxr import *
import os, os.path
import numpy
import re
import usdUtils
import math
import imp
usdStageWithFbxLoaded = True
try:
imp.find_module('fbx')
import fbx
except ImportError:
usdUtils.printError("Failed to import fbx module. Please install FBX Python bindings from http://www.autodesk.com/fbx and add path to FBX Python SDK to your PYTHONPATH")
usdStageWithFbxLoaded = False
class ConvertError(Exception):
pass
def printErrorAndExit(message):
usdUtils.printError(message)
raise ConvertError()
def GfMatrix4dWithFbxMatrix(m):
return Gf.Matrix4d(
m[0][0], m[0][1], m[0][2], m[0][3],
m[1][0], m[1][1], m[1][2], m[1][3],
m[2][0], m[2][1], m[2][2], m[2][3],
m[3][0], m[3][1], m[3][2], m[3][3])
def getFbxNodeTransforms(fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateLocalTransform())
def getFbxNodeGeometricTransform(fbxNode):
# geometry transform is an additional transform for geometry
# it is relative to the node transform
# this transform is not distributing to the children nodes in scene graph
translation = fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot)
rotation = fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot)
scale = fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot)
return fbx.FbxAMatrix(translation, rotation, scale)
def convertUVTransformFromFBX(translation, scale, rotation):
# from FBX to Blender
scale[0] = 1.0 / scale[0]
scale[1] = 1.0 / scale[1]
rotation = -rotation
# Blender: Tuv = T * R * S
# USD: Tuv = S * R * T
scaleMatrix = Gf.Matrix4d(Gf.Vec4d(scale[0], scale[1], 1, 1))
inverseScaleMatrix = Gf.Matrix4d(Gf.Vec4d(1.0 / scale[0], 1.0 / scale[1], 1, 1))
rotationMatrix = Gf.Matrix4d(
math.cos(rotation), math.sin(rotation), 0, 0,
-math.sin(rotation), math.cos(rotation), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1)
inverseRotationMatrix = rotationMatrix.GetTranspose()
translateMatrix = Gf.Matrix4d(1)
translateMatrix.SetTranslate(Gf.Vec3d(translation[0], translation[1], 0))
# translate matrix from Blender to USD
transform = scaleMatrix * rotationMatrix * translateMatrix * inverseRotationMatrix * inverseScaleMatrix
translation3d = transform.ExtractTranslation()
translation[0] = translation3d[0]
translation[1] = translation3d[1]
return translation, scale, math.degrees(rotation)
class FbxNodeManager(usdUtils.NodeManager):
def __init__(self, value=None):
usdUtils.NodeManager.__init__(self)
def overrideGetName(self, fbxNode):
return usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
def overrideGetChildren(self, fbxNode):
children = []
for childIdx in xrange(fbxNode.GetChildCount()):
children.append(fbxNode.GetChild(childIdx))
return children
def overrideGetLocalTransformGfMatrix4d(self, fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateLocalTransform())
def overrideGetWorldTransformGfMatrix4d(self, fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateGlobalTransform())
def overrideGetParent(self, fbxNode):
return fbxNode.GetParent()
class AnimProperty:
def __init__(self, fbxAnimLayer, fbxProperty, timeSpans):
self.fbxAnimLayer = fbxAnimLayer
self.fbxProperty = fbxProperty
self.timeSpans = timeSpans
class FbxConverter:
def __init__(self, fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose):
self.verbose = verbose
self.legacyModifier = legacyModifier
self.copyTextures = copyTextures
self.searchPaths = searchPaths
self.asset = usdUtils.Asset(usdPath)
self.usdStage = None
self.usdMaterials = {}
self.nodeId = 0
self.nodePaths = {}
self.fbxSkinToSkin = {}
self.startAnimationTime = 0
self.stopAnimationTime = 0
self.skeletonByNode = {} # collect skinned mesh to construct later
self.blendShapeByNode = {} # collect blend shapes to construct later
self.copiedTextures = {} # avoid copying textures more then once
self.extent = [[], []]
self.fbxScene = None
filenameFull = fbxPath.split('/')[-1]
self.srcFolder = fbxPath[:len(fbxPath)-len(filenameFull)]
filenameFull = usdPath.split('/')[-1]
self.dstFolder = usdPath[:len(usdPath)-len(filenameFull)]
self.loadFbxScene(fbxPath)
self.fps = fbx.FbxTime.GetFrameRate(fbx.FbxTime.GetGlobalTimeMode())
self.asset.setFPS(self.fps)
self.nodeManager = FbxNodeManager()
self.skinning = usdUtils.Skinning(self.nodeManager)
self.shapeBlending = usdUtils.ShapeBlending()
def loadFbxScene(self, fbxPath):
fbxManager = fbx.FbxManager.Create()
if not fbxManager:
printErrorAndExit("failed to create FBX manager object")
self.fbxManager = fbxManager
fbxIOSettings = fbx.FbxIOSettings.Create(fbxManager, fbx.IOSROOT)
fbxManager.SetIOSettings(fbxIOSettings)
fbxImporter = fbx.FbxImporter.Create(fbxManager, "")
result = fbxImporter.Initialize(fbxPath, -1, fbxManager.GetIOSettings())
if not result:
printErrorAndExit("failed to initialize FbxImporter object")
if fbxImporter.IsFBX():
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_MATERIAL, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_TEXTURE, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_EMBEDDED, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_SHAPE, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_GOBO, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_ANIMATION, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_GLOBAL_SETTINGS, True)
self.fbxScene = fbx.FbxScene.Create(fbxManager, "")
result = fbxImporter.Import(self.fbxScene)
fbxImporter.Destroy()
if not result:
printErrorAndExit("failed to load FBX scene")
def getTextureProperties(self, materialProperty):
if materialProperty.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxFileTexture.ClassId)) > 0:
fbxFileTexture = materialProperty.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxFileTexture.ClassId), 0)
texCoordSet = 'st'
if fbxFileTexture.UVSet is not None:
texCoordSet = str(fbxFileTexture.UVSet.Get())
if texCoordSet == '' or texCoordSet == 'default':
texCoordSet = 'st'
else:
texCoordSet = usdUtils.makeValidIdentifier(texCoordSet)
wrapS = usdUtils.WrapMode.repeat
wrapT = usdUtils.WrapMode.repeat
if fbxFileTexture.GetWrapModeU() == fbx.FbxTexture.eClamp:
wrapS = usdUtils.WrapMode.clamp
if fbxFileTexture.GetWrapModeV() == fbx.FbxTexture.eClamp:
wrapT = usdUtils.WrapMode.clamp
# texture transform
mapTransform = None
translation = [fbxFileTexture.GetTranslationU(), fbxFileTexture.GetTranslationV()]
scale = [fbxFileTexture.GetScaleU(), fbxFileTexture.GetScaleV()]
rotation = fbxFileTexture.GetRotationW()
if (translation[0] != 0 or translation[1] != 0 or
scale[0] != 1 or scale[1] != 1 or
rotation != 0):
(translation, scale, rotation) = convertUVTransformFromFBX(translation, scale, rotation)
mapTransform = usdUtils.MapTransform(translation, scale, rotation)
return fbxFileTexture.GetFileName(), texCoordSet, wrapS, wrapT, mapTransform
elif materialProperty.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxLayeredTexture.ClassId)) > 0:
pass
return '', 'st', usdUtils.WrapMode.repeat, usdUtils.WrapMode.repeat, None
def processMaterialProperty(self, input, propertyName, property, factorProperty, channels, material, fbxMaterial):
value = None
factor = float(factorProperty.Get()) if factorProperty is not None else None
if property is not None:
if channels == 'rgb':
value = [property.Get()[0], property.Get()[1], property.Get()[2]]
else:
if input == usdUtils.InputName.opacity:
transparency = property.Get()[0]
if factor is not None:
transparency = transparency * factor
factor = None
value = 1.0 - transparency
else:
value = float(property.Get()[0])
srcTextureFilename = '' # source texture filename on drive
textureFilename = '' # valid for USD
materialProperty = fbxMaterial.FindProperty(propertyName)
if materialProperty.IsValid():
srcTextureFilename, texCoordSet, wrapS, wrapT, mapTransform = self.getTextureProperties(materialProperty)
srcTextureFilename = usdUtils.resolvePath(srcTextureFilename, self.srcFolder, self.searchPaths)
textureFilename = usdUtils.makeValidPath(srcTextureFilename)
if textureFilename != '' and (self.copyTextures or srcTextureFilename != textureFilename):
if srcTextureFilename in self.copiedTextures:
textureFilename = self.copiedTextures[srcTextureFilename]
else:
newTextureFilename = 'textures/' + os.path.basename(textureFilename)
# do not rewrite the texture with same basename
subfolderIdx = 0
while newTextureFilename in self.copiedTextures.values():
newTextureFilename = 'textures/' + str(subfolderIdx) + '/' + os.path.basename(textureFilename)
subfolderIdx += 1
usdUtils.copy(srcTextureFilename, self.dstFolder + newTextureFilename, self.verbose)
self.copiedTextures[srcTextureFilename] = newTextureFilename
textureFilename = newTextureFilename
if textureFilename != '':
scale = None
if factor is not None:
if channels == 'rgb':
scale = [factor, factor, factor]
else:
scale = factor
material.inputs[input] = usdUtils.Map(channels, textureFilename, value, texCoordSet, wrapS, wrapT, scale, mapTransform)
else:
if value is not None:
if factor is not None:
if channels == 'rgb':
material.inputs[input] = [value[0] * factor, value[1] * factor, value[2] * factor]
else:
material.inputs[input] = value * factor
else:
material.inputs[input] = value
def processMaterials(self):
for i in range(self.fbxScene.GetMaterialCount()):
fbxMaterial = self.fbxScene.GetMaterial(i)
material = usdUtils.Material(fbxMaterial.GetName().split(":")[-1])
normalMap = fbxMaterial.NormalMap if hasattr(fbxMaterial, 'NormalMap') else None
self.processMaterialProperty(usdUtils.InputName.normal, fbx.FbxSurfaceMaterial.sNormalMap, normalMap, None, 'rgb', material, fbxMaterial)
diffuse = fbxMaterial.Diffuse if hasattr(fbxMaterial, 'Diffuse') else None
diffuseFactor = fbxMaterial.DiffuseFactor if hasattr(fbxMaterial, 'DiffuseFactor') else None
self.processMaterialProperty(usdUtils.InputName.diffuseColor, fbx.FbxSurfaceMaterial.sDiffuse, diffuse, diffuseFactor, 'rgb', material, fbxMaterial)
transparentColor = fbxMaterial.TransparentColor if hasattr(fbxMaterial, 'TransparentColor') else None
transparencyFactor = fbxMaterial.TransparencyFactor if hasattr(fbxMaterial, 'TransparencyFactor') else None
self.processMaterialProperty(usdUtils.InputName.opacity, fbx.FbxSurfaceMaterial.sTransparentColor, transparentColor, transparencyFactor, 'a', material, fbxMaterial)
emissive = fbxMaterial.Emissive if hasattr(fbxMaterial, 'Emissive') else None
emissiveFactor = fbxMaterial.EmissiveFactor if hasattr(fbxMaterial, 'EmissiveFactor') else None
self.processMaterialProperty(usdUtils.InputName.emissiveColor, fbx.FbxSurfaceMaterial.sEmissive, emissive, emissiveFactor, 'rgb', material, fbxMaterial)
ambient = fbxMaterial.Ambient if hasattr(fbxMaterial, 'Ambient') else None
ambientFactor = fbxMaterial.AmbientFactor if hasattr(fbxMaterial, 'AmbientFactor') else None
self.processMaterialProperty(usdUtils.InputName.occlusion, fbx.FbxSurfaceMaterial.sAmbient, ambient, ambientFactor, 'r', material, fbxMaterial)
# 'metallic', 'roughness' ?
usdMaterial = material.makeUsdMaterial(self.asset)
if self.legacyModifier is not None:
self.legacyModifier.opacityAndDiffuseOneTexture(material)
self.usdMaterials[fbxMaterial.GetName()] = usdMaterial
def prepareAnimations(self):
animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))
if animStacksCount < 1:
if self.verbose:
print 'No animation found'
return
fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), 0)
timeSpan = fbxAnimStack.GetLocalTimeSpan()
self.startAnimationTime = timeSpan.GetStart().GetSecondDouble()
self.stopAnimationTime = timeSpan.GetStop().GetSecondDouble()
self.asset.extentTime(self.startAnimationTime)
self.asset.extentTime(self.stopAnimationTime)
def processControlPoints(self, fbxMesh, usdMesh):
points = [Gf.Vec3f(p[0], p[1], p[2]) for p in fbxMesh.GetControlPoints()]
extent = Gf.Range3f()
for point in points:
extent.UnionWith(point)
usdMesh.CreatePointsAttr(points)
usdMesh.CreateExtentAttr([Gf.Vec3f(extent.GetMin()), Gf.Vec3f(extent.GetMax())])
if not any(self.extent):
self.extent[0] = extent.GetMin()
self.extent[1] = extent.GetMax()
else:
for i in range(3):
self.extent[0][i] = min(self.extent[0][i], extent.GetMin()[i])
self.extent[1][i] = max(self.extent[1][i], extent.GetMax()[i])
def getVec3fArrayWithLayerElements(self, elements, fbxLayerElements):
elementsArray = fbxLayerElements.GetDirectArray()
for i in xrange(elementsArray.GetCount()):
element = elementsArray.GetAt(i)
elements.append(Gf.Vec3f(element[0], element[1], element[2]))
def getIndicesWithLayerElements(self, fbxMesh, fbxLayerElements):
mappingMode = fbxLayerElements.GetMappingMode()
referenceMode = fbxLayerElements.GetReferenceMode()
indexToDirect = (
referenceMode == fbx.FbxLayerElement.eIndexToDirect or
referenceMode == fbx.FbxLayerElement.eIndex)
indices = []
if mappingMode == fbx.FbxLayerElement.eByControlPoint:
if indexToDirect:
for contorlPointIdx in xrange(fbxMesh.GetControlPointsCount()):
indices.append(fbxLayerElements.GetIndexArray().GetAt(contorlPointIdx))
elif mappingMode == fbx.FbxLayerElement.eByPolygonVertex:
pointIdx = 0
for polygonIdx in xrange(fbxMesh.GetPolygonCount()):
for vertexIdx in xrange(fbxMesh.GetPolygonSize(polygonIdx)):
if indexToDirect:
indices.append(fbxLayerElements.GetIndexArray().GetAt(pointIdx))
else:
indices.append(pointIdx)
pointIdx += 1
elif mappingMode == fbx.FbxLayerElement.eByPolygon:
for polygonIdx in xrange(fbxMesh.GetPolygonCount()):
if indexToDirect:
indices.append(fbxLayerElements.GetIndexArray().GetAt(polygonIdx))
else:
indices.append(polygonIdx)
return indices
def getInterpolationWithLayerElements(self, fbxLayerElements):
mappingMode = fbxLayerElements.GetMappingMode()
if mappingMode == fbx.FbxLayerElement.eByControlPoint:
return UsdGeom.Tokens.vertex
elif mappingMode == fbx.FbxLayerElement.eByPolygonVertex:
return UsdGeom.Tokens.faceVarying
elif mappingMode == fbx.FbxLayerElement.eByPolygon:
return UsdGeom.Tokens.uniform
elif mappingMode == fbx.FbxLayerElement.eAllSame:
return UsdGeom.Tokens.constant
elif mappingMode == fbx.FbxLayerElement.eByEdge:
usdUtils.printWarning("Mapping mode eByEdge for layer elements is not supported.")
return ''
def processNormals(self, fbxMesh, usdMesh, vertexIndices):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerNormals = fbxMesh.GetLayer(layerIdx).GetNormals()
if fbxLayerNormals is None:
continue
normals = []
self.getVec3fArrayWithLayerElements(normals, fbxLayerNormals)
if not any(normals):
continue
indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerNormals)
interpolation = self.getInterpolationWithLayerElements(fbxLayerNormals)
normalPrimvar = usdMesh.CreatePrimvar('normals', Sdf.ValueTypeNames.Normal3fArray, interpolation)
normalPrimvar.Set(normals)
if len(indices) != 0:
normalPrimvar.SetIndices(Vt.IntArray(indices))
break # normals can be in one layer only
def processUVs(self, fbxMesh, usdMesh, vertexIndices):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerUVs = fbxMesh.GetLayer(layerIdx).GetUVs() # get diffuse texture uv-s
if fbxLayerUVs is None:
continue
uvs = []
uvArray = fbxLayerUVs.GetDirectArray()
for i in xrange(uvArray.GetCount()):
uv = uvArray.GetAt(i)
uvs.append(Gf.Vec2f(uv[0], uv[1]))
if not any(uvs):
continue
indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerUVs)
interpolation = self.getInterpolationWithLayerElements(fbxLayerUVs)
texCoordSet = 'st'
uvSets = fbxMesh.GetLayer(layerIdx).GetUVSets()
if len(uvSets) > 0:
fbxLayerElementUV = fbxMesh.GetLayer(layerIdx).GetUVSets()[0]
texCoordSet = str(fbxLayerElementUV.GetName())
if layerIdx == 0 or texCoordSet == '' or texCoordSet == 'default':
texCoordSet = 'st'
else:
texCoordSet = usdUtils.makeValidIdentifier(texCoordSet)
uvPrimvar = usdMesh.CreatePrimvar(texCoordSet, Sdf.ValueTypeNames.Float2Array, interpolation)
uvPrimvar.Set(uvs)
if len(indices) != 0:
uvPrimvar.SetIndices(Vt.IntArray(indices))
def processVertexColors(self, fbxMesh, usdMesh, vertexIndices):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerColors = fbxMesh.GetLayer(layerIdx).GetVertexColors()
if fbxLayerColors is None:
continue
colors = []
colorArray = fbxLayerColors.GetDirectArray()
for i in xrange(colorArray.GetCount()):
fbxColor = colorArray.GetAt(i)
colors.append(Gf.Vec3f(fbxColor.mRed, fbxColor.mGreen, fbxColor.mBlue))
if not any(colors):
continue
indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerColors)
interpolation = self.getInterpolationWithLayerElements(fbxLayerColors)
displayColorPrimvar = usdMesh.CreateDisplayColorPrimvar(interpolation)
displayColorPrimvar.Set(colors)
if len(indices) != 0:
displayColorPrimvar.SetIndices(Vt.IntArray(indices))
break # vertex colors can be in one layer only
def applySkinning(self, fbxNode, fbxSkin, usdMesh, indices):
skin = self.fbxSkinToSkin[fbxSkin]
skeleton = skin.skeleton
maxPointIndex = 0
for clusterIdx in range(fbxSkin.GetClusterCount()):
fbxCluster = fbxSkin.GetCluster(clusterIdx)
for i in range(fbxCluster.GetControlPointIndicesCount()):
pointIndex = fbxCluster.GetControlPointIndices()[i]
if maxPointIndex < pointIndex:
maxPointIndex = pointIndex
vertexCount = maxPointIndex + 1 # should be equal to number of vertices: max(indices) + 1
jointIndicesPacked = [[] for i in range(vertexCount)]
weightsPacked = [[] for i in range(vertexCount)]
for clusterIdx in range(fbxSkin.GetClusterCount()):
fbxCluster = fbxSkin.GetCluster(clusterIdx)
for i in range(fbxCluster.GetControlPointIndicesCount()):
pointIndex = fbxCluster.GetControlPointIndices()[i]
jointIndicesPacked[pointIndex].append(skin.remapIndex(clusterIdx))
weightsPacked[pointIndex].append(float(fbxCluster.GetControlPointWeights()[i]))
components = 0
for indicesPerVertex in jointIndicesPacked:
if components < len(indicesPerVertex):
components = len(indicesPerVertex)
jointIndices = [0] * vertexCount * components
weights = [float(0)] * vertexCount * components
for i in range(vertexCount):
indicesPerVertex = jointIndicesPacked[i]
for j in range(len(indicesPerVertex)):
jointIndices[i * components + j] = indicesPerVertex[j]
weights[i * components + j] = weightsPacked[i][j]
weights = Vt.FloatArray(weights)
UsdSkel.NormalizeWeights(weights, components)
usdSkelBinding = UsdSkel.BindingAPI(usdMesh)
usdSkelBinding.CreateJointIndicesPrimvar(False, components).Set(jointIndices)
usdSkelBinding.CreateJointWeightsPrimvar(False, components).Set(weights)
bindTransformWasNotFound = True
bindTransform = Gf.Matrix4d(1)
for i in range(self.fbxScene.GetPoseCount()):
fbxPose = self.fbxScene.GetPose(i)
if fbxPose is None:
continue
nodeIndex = fbxPose.Find(fbxNode)
if nodeIndex > -1 and (fbxPose.IsBindPose() or not fbxPose.IsLocalMatrix(nodeIndex)):
bindTransform = GfMatrix4dWithFbxMatrix(fbxPose.GetMatrix(nodeIndex))
bindTransformWasNotFound = False
break
if bindTransformWasNotFound and fbxSkin.GetClusterCount() > 0:
if self.verbose:
usdUtils.printWarning("can't find a bind pose for mesh " + fbxNode.GetName() + ". Trying to calculate.")
# FBX stores bind transform matrix for the skin in each cluster
# get it from the first one
fbxCluster = fbxSkin.GetCluster(0)
fbxBindTransform = fbx.FbxAMatrix()
fbxBindTransform = fbxCluster.GetTransformMatrix(fbxBindTransform)
bindTransform = GfMatrix4dWithFbxMatrix(fbxBindTransform)
bindTransform = GfMatrix4dWithFbxMatrix(getFbxNodeGeometricTransform(fbxNode)) * bindTransform
usdSkelBinding.CreateGeomBindTransformAttr(bindTransform)
usdSkelBinding.CreateSkeletonRel().AddTarget(skeleton.usdSkeleton.GetPath())
if self.legacyModifier is not None:
self.legacyModifier.addSkelAnimToMesh(usdMesh, skeleton)
def bindRigidDeformation(self, fbxNode, usdMesh, skeleton):
bindTransform = GfMatrix4dWithFbxMatrix(fbxNode.EvaluateGlobalTransform())
bindTransform = GfMatrix4dWithFbxMatrix(getFbxNodeGeometricTransform(fbxNode)) * bindTransform
skeleton.bindRigidDeformation(fbxNode, usdMesh, GfMatrix4dWithFbxMatrix(bindTransform))
if self.legacyModifier is not None:
self.legacyModifier.addSkelAnimToMesh(usdMesh, skeleton)
def bindMaterials(self, fbxMesh, usdMesh):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerMaterials = fbxMesh.GetLayer(layerIdx).GetMaterials()
if not fbxLayerMaterials:
continue
# looks like there is a bug in FBX SDK:
# GetDirectArray() does not work if .GetCount() has not been called
materialsCount = fbxLayerMaterials.GetDirectArray().GetCount()
if fbxLayerMaterials.GetIndexArray().GetCount() > 1 and fbxLayerMaterials.GetMappingMode() == fbx.FbxLayerElement.eByPolygon:
# subsets
subsets = [[] for i in range(materialsCount)]
for polygonIdx in range(fbxLayerMaterials.GetIndexArray().GetCount()):
materialIndex = fbxLayerMaterials.GetIndexArray().GetAt(polygonIdx)
subsets[materialIndex].append(polygonIdx)
bindingAPI = UsdShade.MaterialBindingAPI(usdMesh)
for materialIndex in range(materialsCount):
facesCount = len(subsets[materialIndex])
if facesCount > 0:
fbxMaterial = fbxLayerMaterials.GetDirectArray().GetAt(materialIndex)
materialName = usdUtils.makeValidIdentifier(fbxMaterial.GetName())
subsetName = materialName + '_subset'
if self.verbose:
print ' subset:', subsetName, 'faces:', facesCount
usdSubset = UsdShade.MaterialBindingAPI.CreateMaterialBindSubset(bindingAPI, subsetName, Vt.IntArray(subsets[materialIndex]))
usdMaterial = self.usdMaterials[fbxMaterial.GetName()]
UsdShade.MaterialBindingAPI(usdSubset).Bind(usdMaterial)
elif fbxLayerMaterials.GetIndexArray().GetCount() > 0:
# one material for whole mesh
fbxMaterial = fbxLayerMaterials.GetDirectArray().GetAt(0)
if fbxMaterial is not None and fbxMaterial.GetName() in self.usdMaterials:
usdMaterial = self.usdMaterials[fbxMaterial.GetName()]
UsdShade.Material.Bind(usdMaterial, usdMesh.GetPrim())
def getFbxMesh(self, fbxNode):
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or
fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType):
return fbxNodeAttribute
return None
def getFbxSkin(self, fbxNode):
fbxMesh = self.getFbxMesh(fbxNode)
if fbxMesh is not None and fbxMesh.GetDeformerCount(fbx.FbxDeformer.eSkin) > 0:
return fbxMesh.GetDeformer(0, fbx.FbxDeformer.eSkin)
return None
def getFbxBlenShape(self, fbxNode):
fbxMesh = self.getFbxMesh(fbxNode)
if fbxMesh is not None and fbxMesh.GetDeformerCount(fbx.FbxDeformer.eBlendShape) > 0:
return fbxMesh.GetDeformer(0, fbx.FbxDeformer.eBlendShape)
return None
def processMesh(self, fbxNode, newPath, underSkeleton, indent):
usdMesh = UsdGeom.Mesh.Define(self.usdStage, newPath)
fbxMesh = fbxNode.GetNodeAttribute()
if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType():
fbxMesh = fbxMesh.GetBaseMesh()
else:
usdMesh.CreateSubdivisionSchemeAttr(UsdGeom.Tokens.none)
indices = []
faceVertexCounts = []
for polygonIdx in xrange(fbxMesh.GetPolygonCount()):
polygonSize = fbxMesh.GetPolygonSize(polygonIdx)
faceVertexCounts.append(polygonSize)
for polygonVertexIdx in xrange(polygonSize):
index = fbxMesh.GetPolygonVertex(polygonIdx, polygonVertexIdx)
indices.append(index)
usdMesh.CreateFaceVertexCountsAttr(faceVertexCounts)
usdMesh.CreateFaceVertexIndicesAttr(indices)
# positions, normals, texture coordinates
self.processControlPoints(fbxMesh, usdMesh)
self.processNormals(fbxMesh, usdMesh, indices)
self.processUVs(fbxMesh, usdMesh, indices)
self.processVertexColors(fbxMesh, usdMesh, indices)
fbxSkin = self.getFbxSkin(fbxNode)
if fbxSkin is not None:
self.applySkinning(fbxNode, fbxSkin, usdMesh, indices)
elif underSkeleton is not None:
self.bindRigidDeformation(fbxNode, usdMesh, underSkeleton)
if self.verbose:
type = 'Mesh'
if fbxSkin is not None:
type = 'Skinned mesh'
elif underSkeleton is not None:
type = 'Rigid skinned mesh'
print indent + type + ': ' + fbxNode.GetName()
self.bindMaterials(fbxMesh, usdMesh)
return usdMesh
def addTranslateOpIfNotEmpty(self, prim, op, name = ''):
if op != fbx.FbxVector4(0, 0, 0, 1):
prim.AddTranslateOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2]))
def addInvertTranslateOpIfNotEmpty(self, prim, op, name = ''):
if op != fbx.FbxVector4(0, 0, 0, -1):
prim.AddTranslateOp(UsdGeom.XformOp.PrecisionFloat, name, True)
def addRotationOpIfNotEmpty(self, prim, op, name = '', idRotation = None):
if idRotation is None:
idRotation = fbx.FbxVector4(0, 0, 0, 1)
if op != idRotation:
prim.AddRotateXYZOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2]))
def addScalingOpIfNotEmpty(self, prim, op, name = '', idScaling = None):
if idScaling is None:
idScaling = fbx.FbxVector4(1, 1, 1, 1)
if op != idScaling:
prim.AddScaleOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2]))
def getXformOp(self, usdGeom, type):
ops = usdGeom.GetOrderedXformOps()
for op in ops:
# find operation without suffix
if op.GetOpType() == type and len(op.GetName().split(':')) == 2:
return op
op = None
if type == UsdGeom.XformOp.TypeTranslate:
op = usdGeom.AddTranslateOp()
elif type == UsdGeom.XformOp.TypeRotateXYZ:
op = usdGeom.AddRotateXYZOp()
if type == UsdGeom.XformOp.TypeOrient:
op = usdGeom.AddOrientOp()
if type == UsdGeom.XformOp.TypeScale:
op = usdGeom.AddScaleOp()
if op is not None:
opNames = [
"xformOp:translate",
"xformOp:translate:rotationOffset",
"xformOp:translate:rotationPivot",
"xformOp:rotateXYZ:preRotation",
"xformOp:rotateXYZ",
"xformOp:rotateXYZ:postRotation",
"!invert!xformOp:translate:rotationPivot",
"xformOp:translate:scalingOffset",
"xformOp:translate:scalingPivot",
"xformOp:scale",
"!invert!xformOp:translate:scalingPivot",
]
ops = usdGeom.GetOrderedXformOps()
newOps = []
for opName in opNames:
checkInverse = False
if opName[0:8] == '!invert!':
opName = opName[8:]
checkInverse = True
for operation in ops:
if operation.GetName() == opName and operation.IsInverseOp() == checkInverse:
newOps.append(operation)
break
usdGeom.SetXformOpOrder(newOps)
return op
def setNodeTransforms(self, node, prim):
t = fbx.FbxVector4(node.LclTranslation.Get())
ro = node.GetRotationOffset(fbx.FbxNode.eSourcePivot)
rp = node.GetRotationPivot(fbx.FbxNode.eSourcePivot)
preRotation = node.GetPreRotation(fbx.FbxNode.eSourcePivot)
r = fbx.FbxVector4(node.LclRotation.Get())
postRotation = node.GetPostRotation(fbx.FbxNode.eSourcePivot)
so = node.GetScalingOffset(fbx.FbxNode.eSourcePivot)
sp = node.GetScalingPivot(fbx.FbxNode.eSourcePivot)
s = fbx.FbxVector4(node.LclScaling.Get())
# set translation
self.addTranslateOpIfNotEmpty(prim, t)
# set rotation offset, pivot and pre-post rotation ops
self.addTranslateOpIfNotEmpty(prim, ro, "rotationOffset")
self.addTranslateOpIfNotEmpty(prim, rp, "rotationPivot")
self.addRotationOpIfNotEmpty(prim, preRotation, "preRotation")
self.addRotationOpIfNotEmpty(prim, r)
self.addRotationOpIfNotEmpty(prim, postRotation, "postRotation")
self.addInvertTranslateOpIfNotEmpty(prim, -rp, "rotationPivot")
# set scaling offset & pivot
self.addTranslateOpIfNotEmpty(prim, so, "scalingOffset")
self.addTranslateOpIfNotEmpty(prim, sp, "scalingPivot")
self.addScalingOpIfNotEmpty(prim, s)
self.addInvertTranslateOpIfNotEmpty(prim, -rp, "scalingPivot")
def hasGeometricTransform(self, fbxNode):
if (fbx.FbxVector4(0, 0, 0, 1) != fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot) or
fbx.FbxVector4(0, 0, 0, 1) != fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot) or
fbx.FbxVector4(1, 1, 1, 1) != fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot)):
return True
return False
def setGeometricTransform(self, fbxNode, prim):
gt = fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot)
gr = fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot)
gs = fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot)
self.addTranslateOpIfNotEmpty(prim, gt, "geometricTranslation")
self.addRotationOpIfNotEmpty(prim, gr, "geometricRotation")
self.addScalingOpIfNotEmpty(prim, gs, "geometricScaling")
def processSkeletalAnimation(self, skeletonIdx):
skeleton = self.skinning.skeletons[skeletonIdx]
framesCount = int((self.stopAnimationTime - self.startAnimationTime) * self.fps + 0.5) + 1
startFrame = int(self.startAnimationTime * self.fps + 0.5)
if framesCount == 1:
if self.verbose:
print ' no skeletal animation'
return
animationName = self.asset.getAnimationsPath() + '/' + 'SkelAnimation'
if skeletonIdx > 0:
animationName += '_' + str(skeletonIdx)
if self.verbose:
print 'Animation:', animationName
usdSkelAnim = UsdSkel.Animation.Define(self.usdStage, animationName)
translateAttr = usdSkelAnim.CreateTranslationsAttr()
rotateAttr = usdSkelAnim.CreateRotationsAttr()
scaleAttr = usdSkelAnim.CreateScalesAttr()
jointPaths = []
for fbxNode in skeleton.joints:
jointPaths.append(skeleton.jointPaths[fbxNode])
fbxAnimEvaluator = self.fbxScene.GetAnimationEvaluator()
for frame in range(framesCount):
time = frame / self.fps + self.startAnimationTime
translations = []
rotations = []
scales = []
for fbxNode in skeleton.joints:
fbxTime = fbx.FbxTime()
fbxTime.SetSecondDouble(time)
fbxMatrix = fbxAnimEvaluator.GetNodeLocalTransform(fbxNode, fbxTime)
translation = fbxMatrix.GetT()
q = fbxMatrix.GetQ()
rotation = Gf.Quatf(float(q[3]), Gf.Vec3f(float(q[0]), float(q[1]), float(q[2])))
scale = fbxMatrix.GetS()
translations.append([translation[0], translation[1], translation[2]])
rotations.append(rotation)
scales.append([scale[0], scale[1], scale[2]])
translateAttr.Set(translations, Usd.TimeCode(frame + startFrame))
rotateAttr.Set(rotations, Usd.TimeCode(frame + startFrame))
scaleAttr.Set(scales, Usd.TimeCode(frame + startFrame))
usdSkelAnim.CreateJointsAttr(jointPaths)
skeleton.setSkeletalAnimation(usdSkelAnim)
def processNodeTransformAnimation(self, fbxNode, fbxProperty, fbxAnimCurveNode, usdGeom):
fbxTimeSpan = fbx.FbxTimeSpan()
fbxAnimCurveNode.GetAnimationInterval(fbxTimeSpan)
startTime = fbxTimeSpan.GetStart().GetSecondDouble()
stopTime = fbxTimeSpan.GetStop().GetSecondDouble()
framesCount = int((stopTime - startTime) * self.fps + 0.5) + 1
if framesCount < 1:
return
startFrame = int(startTime * self.fps + 0.5)
isTranslation = False
isRotation = False
isScale = False
channelName = str(fbxProperty.GetName()).strip()
if channelName == 'Lcl Translation':
isTranslation = True
elif channelName == 'Lcl Rotation':
isRotation = True
elif channelName == 'Lcl Scaling':
isScale = True
else:
if self.verbose:
print 'Warnig: animation channel"', channelName, '"is not supported.'
fbxAnimEvaluator = self.fbxScene.GetAnimationEvaluator()
# TODO: for linear curves use key frames only
for frame in range(startFrame, startFrame + framesCount):
time = frame / self.fps + startTime
timeCode = self.asset.toTimeCode(time, True)
fbxTime = fbx.FbxTime()
fbxTime.SetSecondDouble(time)
if isTranslation:
op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeTranslate)
v = fbxNode.EvaluateLocalTranslation(fbxTime)
op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2])))
elif isRotation:
op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeRotateXYZ)
v = fbxNode.EvaluateLocalRotation(fbxTime)
op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2])))
elif isScale:
op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeScale)
v = fbxNode.EvaluateLocalScaling(fbxTime)
op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2])))
def findAnimationProperties(self, fbxObject):
animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))
if animStacksCount < 1:
return []
animProperties = []
for animStackIdx in range(animStacksCount):
fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), animStackIdx)
for layerIdx in range(fbxAnimStack.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId))):
fbxAnimLayer = fbxAnimStack.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), layerIdx)
for curveNodeIdx in range(fbxAnimLayer.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId))):
fbxAnimCurveNode = fbxAnimLayer.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId), curveNodeIdx)
fbxTimeSpan = fbx.FbxTimeSpan()
fbxAnimCurveNode.GetAnimationInterval(fbxTimeSpan)
for propertyIdx in range(fbxAnimCurveNode.GetDstPropertyCount()):
fbxProperty = fbxAnimCurveNode.GetDstProperty(propertyIdx)
if fbxProperty.GetFbxObject() == fbxObject:
animProperty = AnimProperty(fbxAnimLayer, fbxProperty, fbxTimeSpan)
animProperties.append(animProperty)
return animProperties
def processNodeAnimations(self, fbxNode, usdGeom):
animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))
if animStacksCount < 1:
return
for animStackIdx in range(animStacksCount):
fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), animStackIdx)
for layerIdx in range(fbxAnimStack.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId))):
fbxAnimLayer = fbxAnimStack.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), layerIdx)
for curveNodeIdx in range(fbxAnimLayer.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId))):
fbxAnimCurveNode = fbxAnimLayer.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId), curveNodeIdx)
for propertyIdx in range(fbxAnimCurveNode.GetDstPropertyCount()):
fbxProperty = fbxAnimCurveNode.GetDstProperty(propertyIdx)
fbxObject = fbxProperty.GetFbxObject()
if fbxObject == fbxNode:
self.processNodeTransformAnimation(fbxNode, fbxProperty, fbxAnimCurveNode, usdGeom)
def processNode(self, fbxNode, path, underSkeleton, indent):
nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
newPath = path + '/' + nodeName
if newPath in self.nodePaths:
newPath = newPath + str(self.nodeId)
self.nodeId = self.nodeId + 1
fbxAttributeType = fbx.FbxNodeAttribute.eNone
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType:
if fbxNodeAttribute.IsSkeletonRoot():
skeleton = self.skinning.findSkeletonByRoot(fbxNode)
if skeleton is None:
skeleton = self.skinning.findSkeletonByJoint(fbxNode)
if skeleton is not None:
skeleton.makeUsdSkeleton(self.usdStage, newPath, self.nodeManager)
if self.verbose:
print indent + "SkelRoot:", nodeName
underSkeleton = skeleton
if underSkeleton and self.getFbxMesh(fbxNode) is not None:
self.skeletonByNode[fbxNode] = underSkeleton
elif self.getFbxSkin(fbxNode) is not None:
self.skeletonByNode[fbxNode] = None
elif self.getFbxBlenShape(fbxNode) is not None:
usdNode = self.prepareBlendShape(fbxNode, newPath)
self.setNodeTransforms(fbxNode, usdNode)
self.processNodeAnimations(fbxNode, usdNode)
else:
# if we have a geometric transformation we shouldn't propagate it to node's children
usdNode = None
hasGeometricTransform = self.hasGeometricTransform(fbxNode)
if underSkeleton is None and hasGeometricTransform and underSkeleton is None:
usdNode = UsdGeom.Xform.Define(self.usdStage, newPath)
geometryPath = newPath + '/' + nodeName + '_geometry'
else:
geometryPath = newPath
usdGeometry = None
if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or
fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType):
usdGeometry = self.processMesh(fbxNode, geometryPath, underSkeleton, indent)
if underSkeleton is None:
if usdGeometry is None:
usdGeometry = UsdGeom.Xform.Define(self.usdStage, geometryPath)
self.nodePaths[newPath] = newPath
if hasGeometricTransform:
self.setNodeTransforms(fbxNode, usdNode)
self.setGeometricTransform(fbxNode, usdGeometry)
self.processNodeAnimations(fbxNode, usdNode)
else:
self.setNodeTransforms(fbxNode, usdGeometry)
self.processNodeAnimations(fbxNode, usdGeometry)
# process child nodes recursively
if underSkeleton is not None:
newPath = path # keep meshes directly under SkelRoot scope
for childIdx in xrange(fbxNode.GetChildCount()):
self.processNode(fbxNode.GetChild(childIdx), newPath, underSkeleton, indent + ' ')
def populateSkeletons(self, fbxNode):
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType:
if fbxNodeAttribute.IsSkeletonRoot():
self.skinning.createSkeleton(fbxNode)
for childIdx in xrange(fbxNode.GetChildCount()):
self.populateSkeletons(fbxNode.GetChild(childIdx))
def findSkelRoot(self, fbxNode):
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType:
if fbxNodeAttribute.IsSkeletonRoot():
return fbxNode
fbxParentNode = fbxNode.GetParent()
if fbxParentNode is not None:
return self.findSkelRoot(fbxParentNode)
return None
def populateSkins(self, fbxNode):
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or
fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType):
fbxMesh = fbxNode.GetNodeAttribute()
for i in range(fbxMesh.GetDeformerCount(fbx.FbxDeformer.eSkin)):
fbxSkin = fbxMesh.GetDeformer(i, fbx.FbxDeformer.eSkin)
# try to find skeleton root (.eSkeleton) in parent nodes
root = self.findSkelRoot(fbxSkin.GetCluster(0).GetLink()) if fbxSkin.GetClusterCount() > 0 else None
skin = usdUtils.Skin(root)
for clusterIdx in range(fbxSkin.GetClusterCount()):
fbxCluster = fbxSkin.GetCluster(clusterIdx)
fbxJointNode = fbxCluster.GetLink()
skin.joints.append(fbxJointNode)
linkWorldTransform = fbx.FbxAMatrix()
linkWorldTransform = fbxCluster.GetTransformLinkMatrix(linkWorldTransform)
skin.bindMatrices[fbxJointNode] = GfMatrix4dWithFbxMatrix(linkWorldTransform)
self.skinning.skins.append(skin)
self.fbxSkinToSkin[fbxSkin] = skin
for childIdx in xrange(fbxNode.GetChildCount()):
self.populateSkins(fbxNode.GetChild(childIdx))
def processSkinning(self):
self.populateSkeletons(self.fbxScene.GetRootNode())
self.populateSkins(self.fbxScene.GetRootNode())
self.skinning.createSkeletonsFromSkins()
if self.verbose:
if len(self.skinning.skeletons) > 0:
print " Found skeletons:", len(self.skinning.skeletons), "with", len(self.skinning.skins), "skin(s)"
def processSkinnedMeshes(self):
for fbxNode, skeleton in self.skeletonByNode.iteritems():
fbxSkin = self.getFbxSkin(fbxNode)
if skeleton is None:
if fbxSkin is None:
continue
skin = self.fbxSkinToSkin[fbxSkin]
skeleton = skin.skeleton
nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
newPath = skeleton.sdfPath + '/' + nodeName
if newPath in self.nodePaths:
newPath = newPath + str(self.nodeId)
self.nodeId = self.nodeId + 1
self.nodePaths[newPath] = newPath
self.processMesh(fbxNode, newPath, skeleton, '')
def processSkeletalAnimations(self):
for skeletonIdx in range(len(self.skinning.skeletons)):
self.processSkeletalAnimation(skeletonIdx)
def prepareBlendShape(self, fbxNode, path):
fbxBlendShape = self.getFbxBlenShape(fbxNode)
blendShape = self.shapeBlending.createBlendShape(0)
self.blendShapeByNode[fbxNode] = blendShape
return blendShape.makeUsdSkeleton(self.usdStage, path)
def processBlendShapes(self):
for fbxNode, blendShape in self.blendShapeByNode.iteritems():
nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
newPath = blendShape.sdfPath + '/' + nodeName
if newPath in self.nodePaths:
newPath = newPath + str(self.nodeId)
self.nodeId = self.nodeId + 1
self.nodePaths[newPath] = newPath
usdMesh = self.processMesh(fbxNode, newPath, None, '')
fbxMesh = fbxNode.GetNodeAttribute()
if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType():
fbxMesh = fbxMesh.GetBaseMesh()
points = [Gf.Vec3f(p[0], p[1], p[2]) for p in fbxMesh.GetControlPoints()]
blendShapes = []
blendShapeTargets = []
index = 0;
fbxBlendShape = self.getFbxBlenShape(fbxNode)
for i in range(fbxBlendShape.GetBlendShapeChannelCount()):
fbxBlendShapeChannel = fbxBlendShape.GetBlendShapeChannel(i)
for j in range(fbxBlendShapeChannel.GetTargetShapeCount()):
fbxShape = fbxBlendShapeChannel.GetTargetShape(j)
blendShapeName = "blendShape" + str(index)
index += 1
blendShapeTarget = newPath + "/" + blendShapeName
blendShapeName = self.asset.makeUniqueBlendShapeName(blendShapeName, newPath)
blendShapes.append(blendShapeName)
blendShapeTargets.append(blendShapeTarget)
usdBlendShape = UsdSkel.BlendShape.Define(self.usdStage, blendShapeTarget)
if fbxShape.GetControlPointsCount():
offsets = []
pointIndices = []
for k in range(fbxShape.GetControlPointsCount()):
point = fbxShape.GetControlPointAt(k)
if points[k][0] - point[0] != 0 or points[k][1] - point[1] or points[k][2] - point[2]:
offsets.append(Gf.Vec3f(point[0] - points[k][0], point[1] - points[k][1], point[2] - points[k][2]))
pointIndices.append(k)
usdBlendShape.CreateOffsetsAttr(offsets)
usdBlendShape.CreatePointIndicesAttr(pointIndices)
usdSkelBlendShapeBinding = UsdSkel.BindingAPI(usdMesh)
usdSkelBlendShapeBinding.CreateBlendShapesAttr(blendShapes)
usdSkelBlendShapeBinding.CreateBlendShapeTargetsRel().SetTargets(blendShapeTargets)
UsdSkel.BindingAPI.Apply(usdMesh.GetPrim());
blendShape.addBlendShapeList(blendShapes)
def processBlendShapeAnimations(self):
framesCount = int((self.stopAnimationTime - self.startAnimationTime) * self.fps + 0.5) + 1
startFrame = int(self.startAnimationTime * self.fps + 0.5)
if framesCount == 1:
return
blendShapeIdx = 0
for fbxNode, blendShape in self.blendShapeByNode.iteritems():
fbxBlendShape = self.getFbxBlenShape(fbxNode)
animationName = self.asset.getAnimationsPath() + '/' + 'BlenShapeAnim'
if blendShapeIdx > 0:
animationName += '_' + str(blendShapeIdx)
if self.verbose:
print 'Animation:', animationName
blendShapeIdx += 1
usdSkelAnim = UsdSkel.Animation.Define(self.usdStage, animationName)
attr = usdSkelAnim.CreateBlendShapeWeightsAttr()
for frame in range(framesCount):
time = frame / self.fps + self.startAnimationTime
values = []
for i in range(fbxBlendShape.GetBlendShapeChannelCount()):
fbxBlendShapeChannel = fbxBlendShape.GetBlendShapeChannel(i)
animProperties = self.findAnimationProperties(fbxBlendShapeChannel)
for animProperty in animProperties:
#channelName = str(fbxProperty.GetName()).strip()
fbxMesh = fbxNode.GetNodeAttribute()
if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType():
fbxMesh = fbxMesh.GetBaseMesh()
fbxTime = fbx.FbxTime()
fbxTime.SetSecondDouble(time)
fbxAnimCurve = animProperty.fbxProperty.GetCurve(animProperty.fbxAnimLayer)
values.append(fbxAnimCurve.Evaluate(fbxTime)[0] / 100.0) # in percent
attr.Set(time = Usd.TimeCode(frame + startFrame), value = values)
blendShape.setSkeletalAnimation(usdSkelAnim)
self.shapeBlending.flush()
def makeUsdStage(self):
self.usdStage = self.asset.makeUsdStage()
# axis system for USD should be Y-up, odd-forward, and right-handed
sceneAxisSystem = self.fbxScene.GetGlobalSettings().GetAxisSystem()
axisSystem = fbx.FbxAxisSystem(fbx.FbxAxisSystem.EUpVector(fbx.FbxAxisSystem.eYAxis),
fbx.FbxAxisSystem.EFrontVector(fbx.FbxAxisSystem.eParityOdd),
fbx.FbxAxisSystem.ECoordSystem(fbx.FbxAxisSystem.eRightHanded))
if sceneAxisSystem != axisSystem:
if self.verbose:
print(" converting to Y-up, odd-forward, and right-handed axis system")
axisSystem.ConvertScene(self.fbxScene)
systemUnit = self.fbxScene.GetGlobalSettings().GetSystemUnit()
if systemUnit != fbx.FbxSystemUnit.cm: # cm is default for USD and FBX
fbxMetersPerUnit = 0.01
metersPerUnit = systemUnit.GetScaleFactor() * fbxMetersPerUnit
if self.legacyModifier is not None and self.legacyModifier.getMetersPerUnit() == 0:
self.legacyModifier.setMetersPerUnit(metersPerUnit)
else:
self.usdStage.SetMetadata("metersPerUnit", metersPerUnit)
self.processMaterials()
self.processSkinning()
self.prepareAnimations()
self.processNode(self.fbxScene.GetRootNode(), self.asset.getGeomPath(), None, '')
self.processSkeletalAnimations()
self.processSkinnedMeshes()
self.processBlendShapes()
self.processBlendShapeAnimations()
self.asset.finalize()
return self.usdStage
def usdStageWithFbx(fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose):
if usdStageWithFbxLoaded == False:
return None
try:
fbxConverter = FbxConverter(fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose)
return fbxConverter.makeUsdStage()
except ConvertError:
return None
except:
raise
return None
| 45.168156
| 176
| 0.635229
| 52,763
| 0.948941
| 0
| 0
| 0
| 0
| 0
| 0
| 3,150
| 0.056653
|
3f6b95561ed162423b6adee3e5e40b725abe8dde
| 3,291
|
py
|
Python
|
modules/ghautoknit/EmbeddedConstraint.py
|
fstwn/ghautokn
|
5ca6d07df601d34be5a67fe6c76a942daef50a85
|
[
"MIT"
] | 2
|
2021-02-19T19:55:21.000Z
|
2021-10-13T23:55:56.000Z
|
modules/ghautoknit/EmbeddedConstraint.py
|
fstwn/ghautoknit
|
5ca6d07df601d34be5a67fe6c76a942daef50a85
|
[
"MIT"
] | null | null | null |
modules/ghautoknit/EmbeddedConstraint.py
|
fstwn/ghautoknit
|
5ca6d07df601d34be5a67fe6c76a942daef50a85
|
[
"MIT"
] | null | null | null |
# PYTHON STANDARD LIBRARY IMPORTS ----------------------------------------------
from __future__ import absolute_import
from __future__ import division
# LOCAL MODULE IMPORTS ---------------------------------------------------------
from ghautoknit.StoredConstraint import StoredConstraint
# ALL LIST ---------------------------------------------------------------------
__all__ = [
"EmbeddedConstraint"
]
# ACTUAL CLASS -----------------------------------------------------------------
class EmbeddedConstraint(object):
"""
Class for representing an autoknit constraint in relation to the model.
The chain is only stored as vertex indices.
"""
def __init__(self, chain, value, radius):
"""Create a new autoknit Constraint."""
self._set_chain(chain)
self._set_value(value)
self._set_radius(radius)
def ToString(self):
name = "Autoknit EmbeddedConstraint"
data = "({}, {}, {})".format(self.Chain, self.Value, self.Radius)
return name + data
# BASE PROPERTIES ----------------------------------------------------------
# CHAIN PROPERTY -----------------------------------------------------------
def _get_chain(self):
return self._chain
def _set_chain(self, chain):
if type(chain) != list:
raise RuntimeError("Expected list of vertex indices as chain!")
try:
for i, item in enumerate(chain):
chain[i] = int(item)
except:
raise RuntimeError("Some of the indices in the given chain " + \
"failed to convert to integers!")
self._chain = chain
Chain = property(_get_chain, _set_chain, None,
"The chain of points of the constraint.")
# TIME VALUE PROPERTY ------------------------------------------------------
def _get_value(self):
return self._value
def _set_value(self, value):
try:
value = float(value)
except Exception, e:
raise RuntimeError("Failed to set time value for constraint " + \
"{} // {}".format(str(self), e))
self._value = value
Value = property(_get_value, _set_value, None,
"The time value of the constraint.")
# RADIUS PROPERTY ----------------------------------------------------------
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
try:
radius = float(radius)
except Exception, e:
raise RuntimeError("Failed to set radius for constraint " + \
"{} // {}".format(str(self), e))
self._radius = radius
Radius = property(_get_radius, _set_radius, None,
"The radius of the constraint.")
# CONVERT CONSTRAINT FOR STORAGE -------------------------------------------
def _get_storable(self):
count = len(self.Chain)
storable = (count, self.Value, self.Radius)
return storable
Storable = property(_get_storable, None, None,
"A storable version of this constraint.")
# MAIN -------------------------------------------------------------------------
if __name__ == '__main__':
pass
| 35.387097
| 80
| 0.485567
| 2,678
| 0.813734
| 0
| 0
| 0
| 0
| 0
| 0
| 1,389
| 0.42206
|
3f6d7159e38e2302b0b79887ec33606e37733f75
| 1,516
|
py
|
Python
|
vr/server/tests/test_build.py
|
isabella232/vr.server
|
705511f8176bda0627be1ae86a458178589ee3db
|
[
"MIT"
] | null | null | null |
vr/server/tests/test_build.py
|
isabella232/vr.server
|
705511f8176bda0627be1ae86a458178589ee3db
|
[
"MIT"
] | 3
|
2016-12-15T21:55:02.000Z
|
2019-02-13T11:43:29.000Z
|
vr/server/tests/test_build.py
|
isabella232/vr.server
|
705511f8176bda0627be1ae86a458178589ee3db
|
[
"MIT"
] | 2
|
2017-01-16T09:31:03.000Z
|
2022-03-26T09:21:36.000Z
|
import tempfile
import pytest
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from django.core.files import File
from vr.server import models
from vr.server.tests import randurl
from vr.common.utils import randchars
pytestmark = pytest.mark.usefixtures('postgresql')
pytestmark = pytest.mark.usefixtures('gridfs')
def test_build_usable(gridfs):
app_url = randurl()
a = models.App(name=randchars(), repo_url=app_url, repo_type='hg')
a.save()
with somefile() as f:
b = models.Build(
app=a,
tag='blah',
start_time=timezone.now() - relativedelta(minutes=2),
end_time=timezone.now() - relativedelta(minutes=1),
file=File(f),
status='success',
)
b.save()
assert b.is_usable() is True
def test_build_unusable_status(gridfs):
app_url = randurl()
a = models.App(name=randchars(), repo_url=app_url, repo_type='hg')
a.save()
with somefile() as f:
b = models.Build(
app=a,
tag='blah',
start_time=timezone.now() - relativedelta(minutes=2),
end_time=timezone.now() - relativedelta(minutes=1),
file=File(f),
status='',
)
b.save()
assert b.is_usable() is False
class somefile():
def __enter__(self):
self.file = tempfile.NamedTemporaryFile()
return self.file
def __exit__(self, type, value, traceback):
self.file.close()
| 25.694915
| 70
| 0.622691
| 192
| 0.126649
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.033641
|
3f6f2cf82e789df4f6fa6a684b85e6cb348c35e2
| 629
|
py
|
Python
|
apps/api/v1/pagination.py
|
asmuratbek/oobamarket
|
1053976a13ea84b9aabfcbbcbcffd79549ce9538
|
[
"MIT"
] | null | null | null |
apps/api/v1/pagination.py
|
asmuratbek/oobamarket
|
1053976a13ea84b9aabfcbbcbcffd79549ce9538
|
[
"MIT"
] | 7
|
2020-06-05T23:36:01.000Z
|
2022-01-13T01:42:07.000Z
|
apps/api/v1/pagination.py
|
asmuratbek/oobamarket
|
1053976a13ea84b9aabfcbbcbcffd79549ce9538
|
[
"MIT"
] | null | null | null |
from rest_framework.pagination import LimitOffsetPagination, PageNumberPagination
class CategoryLimitPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 40
class ProductLimitPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 40
class ShopLimitPagination(PageNumberPagination):
page_size = 21
page_size_query_param = 'page_size'
max_page_size = 42
class ShopProductsLimitPagination(PageNumberPagination):
page_size = 24
page_size_query_param = 'page_size'
max_page_size = 42
| 24.192308
| 81
| 0.779014
| 535
| 0.850556
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.069952
|
3f715eb5609a277ea4d93cce4d190d4b920a7477
| 7,796
|
py
|
Python
|
GraphOfDocs_Representation/graph_algos.py
|
imis-lab/book-chapter
|
8260a60ec91dd29616eeed80f34bdea00fb73cd7
|
[
"MIT"
] | null | null | null |
GraphOfDocs_Representation/graph_algos.py
|
imis-lab/book-chapter
|
8260a60ec91dd29616eeed80f34bdea00fb73cd7
|
[
"MIT"
] | null | null | null |
GraphOfDocs_Representation/graph_algos.py
|
imis-lab/book-chapter
|
8260a60ec91dd29616eeed80f34bdea00fb73cd7
|
[
"MIT"
] | null | null | null |
import time
import json
import traceback
import numpy as np
from statistics import mean
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
class GraphAlgos:
"""
Wrapper class which handle the graph algorithms
more efficiently, by abstracting repeating code.
"""
database = None # Static variable shared across objects.
def __init__(self, database, start, relationship, end = None, orientation = 'NATURAL', rel_weight = None):
# Initialize the static variable and class member.
if GraphAlgos.database is None:
GraphAlgos.database = database
# Initialize the optional parameter.
end = end if end is not None else start
# Construct the projection of the anonymous graph.
self.graph_projection = (
f'{{nodeProjection: ["{start}", "{end}"], '
'relationshipProjection: {'
f'{relationship}: {{'
f'type: "{relationship}", '
f'orientation: "{orientation}"'
)
# If the relationship weight property exists, then set it.
if rel_weight is not None:
self.graph_projection += f', properties: "{rel_weight}"'
# Add two right brackets to complete the query.
self.graph_projection += '}}'
def pagerank(self, write_property, max_iterations = 20, damping_factor = 0.85):
setup = (f'{self.graph_projection}, '
f'writeProperty: "{write_property}", '
f'maxIterations: {max_iterations}, '
f'dampingFactor: {damping_factor}}}'
)
GraphAlgos.database.execute(f'CALL gds.pageRank.write({setup})', 'w')
def nodeSimilarity(self, write_property, write_relationship, cutoff = 0.5, top_k = 10):
setup = (f'{self.graph_projection}, '
f'writeProperty: "{write_property}", '
f'writeRelationshipType: "{write_relationship}", '
f'similarityCutoff: {cutoff}, '
f'topK: {top_k}}}'
)
GraphAlgos.database.execute(f'CALL gds.nodeSimilarity.write({setup})', 'w')
def louvain(self, write_property, max_levels = 10, max_iterations = 10):
setup = (f'{self.graph_projection}, '
f'writeProperty: "{write_property}", '
f'maxLevels: {max_levels}, '
f'maxIterations: {max_iterations}}}'
)
GraphAlgos.database.execute(f'CALL gds.louvain.write({setup})', 'w')
def node2vec(self, write_property, embedding_dim = 100, iterations = 1, walk_length = 80,
walks_per_node = 10, window_size = 10, walk_buffer_size = 1000):
setup = (f'{self.graph_projection}, '
f'writeProperty: "{write_property}", '
f'embeddingDimension: {embedding_dim}, '
f'iterations: {iterations}, '
f'walkLength: {walk_length}, '
f'walksPerNode: {walks_per_node}, '
f'windowSize: {window_size}, '
f'walkBufferSize: {walk_buffer_size}}}'
)
GraphAlgos.database.execute(f'CALL gds.alpha.node2vec.write({setup})', 'w')
def graphSage(self, write_property, rel_weight = None, embedding_dim = 64, epochs = 1,
max_iterations = 10, aggregator = 'mean', activation_function = 'sigmoid'):
# The community edition of the Neo4j Graph Data Science Library allows only one model to be stored in the database.
model_exists = GraphAlgos.database.execute('CALL gds.beta.model.exists("graphSage") YIELD exists', 'r')[0][0]
if model_exists: # then drop the model from the database.
GraphAlgos.database.execute('CALL gds.beta.model.drop("graphSage")', 'r')
train_setup = (f'{self.graph_projection}, '
f'embeddingDimension: {embedding_dim}, '
f'epochs: {epochs}, '
f'modelName: "graphSage", '
f'maxIterations: {max_iterations}, '
f'aggregator: "{aggregator}", '
f'activationFunction: "{activation_function}", '
'degreeAsProperty: True'
)
# If the relationship weight property exists, then set it.
if rel_weight is not None:
train_setup += f', relationshipWeightProperty: "{rel_weight}"'
# Add a right bracket to complete the query.
train_setup += '}'
write_setup = (f'{self.graph_projection}, '
f'writeProperty: "{write_property}", '
f'modelName: "graphSage"}}'
)
GraphAlgos.database.execute(f'CALL gds.beta.graphSage.train({train_setup})', 'w')
GraphAlgos.database.execute(f'CALL gds.beta.graphSage.write({write_setup})', 'w')
def fastRP(self, write_property, rel_weight = None, embedding_dim = 100, iterations = 10):
# Construct the iteration weights vector, its first element is 0.0 and the rest are 1.0.
# The length of the vector determines the amount of iterations by the algorithm.
iteration_weights = [0.0] + [1.0] * (iterations - 1)
setup = (f'{self.graph_projection}, '
f'writeProperty: "{write_property}", '
f'embeddingDimension: {embedding_dim}, '
f'iterationWeights: {iteration_weights}'
)
# If the relationship weight property exists, then set it.
if rel_weight is not None:
setup += f', relationshipWeightProperty: "{rel_weight}"'
# Add a right bracket to complete the query.
setup += '}'
GraphAlgos.database.execute(f'CALL gds.fastRP.write({setup})', 'w')
@staticmethod
def get_embeddings(write_property):
query = (
'MATCH (p:Person)-[:is_assigned_to]->(i:Issue) '
f'WHERE EXISTS(i.{write_property}) '
f'RETURN i.{write_property}, p.uname AS assignee'
)
return GraphAlgos.database.execute(query, 'r')
@staticmethod
def write_word_embeddings_to_csv(write_property, filepath):
query = (
f'MATCH (w:Word) WHERE EXISTS(w.{write_property}) '
f'RETURN w.key, w.{write_property}'
)
with open(filepath, 'w', encoding = 'utf-8-sig', errors = 'ignore') as file:
file.write('idx,word,embedding\n')
for i, (word, embedding) in enumerate(GraphAlgos.database.execute(query, 'r')):
file.write(f'{i},{word},"{embedding}"\n')
@staticmethod
def train_classifier(embeddings):
# Unpack the embeddings and the assignees in X and Y separately.
X, y = map(list, zip(*embeddings))
# Transform y using the Label Encoder.
y = preprocessing.LabelEncoder().fit_transform(y)
# Split our dataset into train and test.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 42)
# Construct the classifier.
LR = LogisticRegression(random_state = 0, multi_class = 'multinomial')
# Train the classifier.
LR.fit(X_train, y_train)
# Predict the values.
y_pred = LR.predict(X_test)
# Print the classification report.
print(classification_report(y_test, y_pred, labels = np.unique(y_pred)))
# These methods enable the use of this class in a with statement.
def __enter__(self):
return self
# Automatic cleanup of the created graph of this class.
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
traceback.print_exception(exc_type, exc_value, tb)
| 41.913978
| 124
| 0.608261
| 7,504
| 0.962545
| 0
| 0
| 1,645
| 0.211006
| 0
| 0
| 3,533
| 0.453181
|
3f730b00ede0a815c4c62737f803ff84e093f24f
| 3,124
|
py
|
Python
|
Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py
|
rtobar/askapsoft
|
6bae06071d7d24f41abe3f2b7f9ee06cb0a9445e
|
[
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | 1
|
2020-06-18T08:37:43.000Z
|
2020-06-18T08:37:43.000Z
|
Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py
|
ATNF/askapsoft
|
d839c052d5c62ad8a511e58cd4b6548491a6006f
|
[
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null |
Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py
|
ATNF/askapsoft
|
d839c052d5c62ad8a511e58cd4b6548491a6006f
|
[
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null |
# regression tests with gridders taking w-term into account
# some fixed parameters are given in wtermtest_template.in
from synthprogrunner import *
def analyseResult(spr, checkWeights=True):
'''
spr - synthesis program runner (to run imageStats)
throws exceptions if something is wrong, otherwise just
returns
'''
src_offset = 0.006/math.pi*180.
psf_peak=[-172.5,-45]
true_peak=sinProjection(psf_peak,src_offset,src_offset)
stats = spr.imageStats('image.field1.restored')
print "Statistics for restored image: ",stats
disterr = getDistance(stats,true_peak[0],true_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
if abs(stats['peak']-1.)>0.1:
raise RuntimeError, "Peak flux in the image is notably different from 1 Jy, F=%f" % stats['peak']
stats = spr.imageStats('image.field1')
print "Statistics for modelimage: ",stats
disterr = getDistance(stats,true_peak[0],true_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
stats = spr.imageStats('psf.field1')
print "Statistics for psf image: ",stats
disterr = getDistance(stats,psf_peak[0],psf_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
stats = spr.imageStats('psf.image.field1')
print "Statistics for preconditioned psf image: ",stats
disterr = getDistance(stats,psf_peak[0],psf_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
if abs(stats['peak']-1.)>0.01:
raise RuntimeError, "Peak flux in the preconditioned psf image is notably different from 1.0, F=%f" % stats['peak']
if checkWeights:
stats = spr.imageStats('weights.field1')
print "Statistics for weight image: ",stats
if abs(stats['rms']-stats['peak'])>0.1 or abs(stats['rms']-stats['median'])>0.1 or abs(stats['peak']-stats['median'])>0.1:
raise RuntimeError, "Weight image is expected to be constant for WProject and WStack gridders"
stats = spr.imageStats('residual.field1')
print "Statistics for residual image: ",stats
if stats['rms']>0.01 or abs(stats['median'])>0.0001:
raise RuntimeError, "Residual image has too high rms or median. Please verify"
spr = SynthesisProgramRunner(template_parset = 'wtermtest_template.in')
spr.runSimulator()
spr.addToParset("Cimager.gridder = WProject")
spr.runImager()
analyseResult(spr)
spr.initParset()
spr.addToParset("Cimager.gridder = WStack")
spr.runImager()
analyseResult(spr)
spr.initParset()
spr.addToParset("Cimager.gridder = WProject")
spr.addToParset("Cimager.gridder.snapshotimaging = true")
spr.addToParset("Cimager.gridder.snapshotimaging.wtolerance = 500")
spr.runImager()
analyseResult(spr,False)
| 42.216216
| 142
| 0.717029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,476
| 0.472471
|
3f731bc8d56706afd6b8d8a2244161c707b604bd
| 6,047
|
py
|
Python
|
manage/fuzzytranslation.py
|
Acidburn0zzz/browser-update
|
fed7b4c52deccd582fcf8b8cca4809607bbb32cd
|
[
"MIT"
] | 2
|
2017-10-06T15:53:23.000Z
|
2017-10-06T15:53:38.000Z
|
manage/fuzzytranslation.py
|
Acidburn0zzz/browser-update
|
fed7b4c52deccd582fcf8b8cca4809607bbb32cd
|
[
"MIT"
] | null | null | null |
manage/fuzzytranslation.py
|
Acidburn0zzz/browser-update
|
fed7b4c52deccd582fcf8b8cca4809607bbb32cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 12 14:21:31 2016
@author: TH
"""
#%%
import polib
#%%
#old (translated) string
#new renamed string
pairs="""
An initiative by web designers to inform users about browser-updates
An initiative by websites to inform users to update their web browser
If you are on a computer that is maintained by an admin and you cannot install a new browser, ask your admin about it.
Ask your admin to update your browser if you cannot install updates yourself.
blaasdasdfsdaf
faselsdfsadf""";
pairs=pairs.replace("\r","")[1:-1].split("\n\n")
mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs}
#%%
po = polib.pofile('lang/de_DE/LC_MESSAGES/update.po')
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print("replacing", entry.msgid[:10], "with",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/de_DE/LC_MESSAGES/update.mo')
#%%
pairs="""aaa
bbb
Subtle
Unobtrusive
bla
fasel"""
pairs=pairs.replace("\r","")[1:-1].split("\n\n")
mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs}
#%%
po = polib.pofile('lang/de_DE/LC_MESSAGES/site.po')
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print("replacing", entry.msgid[:10], "with",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/de_DE/LC_MESSAGES/site.mo')
#%%
pot = polib.pofile('lang/update.pot')
for entry in pot:
print (entry.msgid, entry.msgstr)
#%%
#%% display old translations
po = polib.pofile('lang/de_DE/LC_MESSAGES/update.po')
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
print(entry.msgid)
#%%
#%% getting files
from glob import glob
paths = glob('lang/*/LC_MESSAGES/')
paths=[p[5:10] for p in paths]
paths
#%% updating all site.po
for p in paths:
print("updating %s"%p)
try:
po = polib.pofile('lang/%s/LC_MESSAGES/site.po'%p)
except OSError:
print("no file found")
continue
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/%s/LC_MESSAGES/site.mo'%p)
#%% updating all update.po
for p in paths:
print("updating %s"%p)
try:
po = polib.pofile('lang/%s/LC_MESSAGES/update.po'%p)
except OSError:
print("no file found")
continue
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/%s/LC_MESSAGES/update.mo'%p)
#%%
pairs="""aaa
bbb
Optionally include up to two placeholders "%s" which will be replaced with the browser version and contents of the link tag. Example: "Your browser (%s) is old. Please <a%s>update</a>"
Optionally include up to two placeholders "%s" which will be replaced with the browser version and contents of the link tag. Example: "Your browser (%s) is old. Please <a%s>update</a>"
bla
fasel"""
pairs=pairs.replace("\r","")[1:-1].split("\n\n")
mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs}
#%%
from glob import glob
paths = glob('lang/*/LC_MESSAGES/')
paths=[p[5:10] for p in paths]
paths
#%% updating all site.po
for p in paths:
print("customize %s"%p)
try:
po = polib.pofile('lang/%s/LC_MESSAGES/customize.po'%p)
except OSError:
print("no file found")
continue
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/%s/LC_MESSAGES/customize.mo'%p)
#%% extract strings
import subprocess
subprocess.call(['xgettext',
"header.php",
"footer.php",
"update-browser.php",
"--keyword=T_gettext",
"--keyword=T_",
"--keyword=T_ngettext:1,2",
"--from-code=utf-8",
"--package-name=browser-update-update",
"--language=PHP",
"--output=lang/update.pot"])
#%% extract site strings
import subprocess
subprocess.call(['xgettext',
"blog.php",
"stat.php",
"index.php",
"contact.php",
"update.testing.php",
"--keyword=T_gettext",
"--keyword=T_",
"--keyword=T_ngettext:1,2",
"--from-code=utf-8",
"--package-name=browser-update-site",
"--language=PHP",
"--output=lang/site.pot"])
#%% extract customize strings
import subprocess
subprocess.call(['xgettext',
"customize.php",
"--keyword=T_gettext",
"--keyword=T_",
"--keyword=T_ngettext:1,2",
"--from-code=utf-8",
"--package-name=browser-update-customize",
"--language=PHP",
"--output=lang/customize.pot"])
#%% upload new sources for translations
import subprocess
subprocess.call(['crowdin-cli-py', 'upload', 'sources'])
#subprocess.call(['java', '-jar', 'manage\crowdin-cli.jar', 'upload', 'sources','--config','manage\crowdin.yaml'])
#subprocess.call(['java', '-jar', 'manage\crowdin-cli.jar', 'upload', 'sources'])
| 29.21256
| 197
| 0.599471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,795
| 0.462213
|
58ac40be7eaf9e54cb6bdb3fcb14931b75949783
| 2,401
|
py
|
Python
|
scripts/test_template.py
|
1466899531/auto_api_test
|
cb0f474394ef776d4b7710821b74065307a551b2
|
[
"MIT"
] | 16
|
2021-01-21T05:07:15.000Z
|
2022-02-20T06:34:28.000Z
|
scripts/test_template.py
|
liucong233/auto_api_test
|
303dc2962ad9b9917cb6a31e311e4ca711f7c7bf
|
[
"MIT"
] | null | null | null |
scripts/test_template.py
|
liucong233/auto_api_test
|
303dc2962ad9b9917cb6a31e311e4ca711f7c7bf
|
[
"MIT"
] | 7
|
2021-01-21T05:07:42.000Z
|
2022-01-04T03:56:25.000Z
|
# -*- coding:utf-8 -*-
"""
@File : test_template
@Author : Chen
@Contact : nonevxx@gmail.com
@Date : 2021/1/20 20:09
@Desc :
"""
# 导包
import pytest
import requests
from time import sleep
from api.template_api import TemplateAPI
from tools.get_log import GetLog
from tools.read_file import read_json
import allure
# 获取日志器
log = GetLog.get_log()
@allure.feature('测试类模板')
@pytest.skip("参考模板, 不执行")
class TestTemplate:
session = None
# 初始化方法
@classmethod
def setup_class(cls):
cls.session = requests.Session() # 初始化session对象
cls.template = TemplateAPI()
# 结束方法
@classmethod
def teardown_class(cls):
cls.session.close()
@classmethod
def setup(cls):
sleep(1.5)
# 测试方法
@allure.story("测试方法模板-add")
@pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_add"))
def test_add(self, attr1, attr2, success, expect):
# 添加功能API调用
response = self.template.api_add(self.session, attr1, attr2)
# 打印日志
log.info("添加功能-状态码为: {}".format(response.status_code))
# 断言状态码
assert response.status_code == expect, "状态码断言失败"
@allure.story("测试方法模板-upd")
@pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_upd"))
def test_upd(self, attr1, attr2, success, expect):
# 添加功能API调用
response = self.template.api_upd(self.session, attr1, attr2)
# 打印日志
log.info("修改功能-状态码为: {}".format(response.status_code))
# 断言状态码
assert response.status_code == expect, "状态码断言失败"
@allure.story("测试方法模板-get")
@pytest.mark.parametrize(("attr1", "attr2", "success", "expect"), read_json("test_get"))
def test_get(self, attr1, attr2, success, expect):
# 添加功能API调用
response = self.template.api_get(self.session, attr1, attr2)
# 打印日志
log.info("查询功能-状态码为: {}".format(response.status_code))
# 断言状态码
assert response.status_code == expect, "状态码断言失败"
@allure.story("测试方法模板-del")
@pytest.mark.parametrize(("uid", "success", "expect"), read_json("test_del"))
def test_del(self, uid, success, expect):
# 添加功能API调用
response = self.template.api_del(self.session, uid)
# 打印日志
log.info("删除功能-状态码为: {}".format(response.status_code))
# 断言状态码
assert response.status_code == expect, "状态码断言失败"
| 28.583333
| 92
| 0.634736
| 2,323
| 0.840753
| 0
| 0
| 2,398
| 0.867897
| 0
| 0
| 947
| 0.342743
|
58af6add6122e05924d2e3ff9cd5a71f9446a5c7
| 5,090
|
py
|
Python
|
dockerfilegenerator/generator.py
|
ccurcanu/aws-serverless-dockerfile-generator
|
cb1e272d21337074870c0b1f17c7535cd1e7c217
|
[
"Apache-2.0"
] | 2
|
2019-08-05T02:24:56.000Z
|
2020-04-30T22:16:00.000Z
|
dockerfilegenerator/generator.py
|
ccurcanu/aws-serverless-dockerfile-generator
|
cb1e272d21337074870c0b1f17c7535cd1e7c217
|
[
"Apache-2.0"
] | 1
|
2020-04-11T16:34:08.000Z
|
2020-04-11T19:29:26.000Z
|
dockerfilegenerator/generator.py
|
ccurcanu/aws-serverless-dockerfile-generator
|
cb1e272d21337074870c0b1f17c7535cd1e7c217
|
[
"Apache-2.0"
] | 1
|
2019-08-05T02:24:59.000Z
|
2019-08-05T02:24:59.000Z
|
# -*- coding: utf-8 -*-
import botocore.exceptions
import logging
import dockerfilegenerator.lib.constants as constants
import dockerfilegenerator.lib.exceptions as exceptions
import dockerfilegenerator.lib.versions as versions
import dockerfilegenerator.lib.jsonstore as jsonstore
import dockerfilegenerator.lib.s3store as s3store
import dockerfilegenerator.lib.github as github
logger = logging.getLogger()
TRACKED_TOOLS = {
"terraform": versions.get_latest_hashicorp_terraform_version,
"packer": versions.get_latest_hashicorp_packer_version,
"go": versions.get_latest_golango_go_version
}
class UtilsMixin:
@property
def tools_current_versions(self):
if not hasattr(self, "_tools_current_versions"):
self._tools_current_versions = None
if self._tools_current_versions is None:
self._tools_current_versions = dict(
(tool_name, self.dockerfile.version(tool_name))
for tool_name in self.dockerfile.json)
return self._tools_current_versions
@property
def tools_next_versions(self):
if not hasattr(self, "_tools_next_versions"):
self._tools_next_versions = None
if self._tools_next_versions is None:
self._tools_next_versions = dict(
(tool_name, TRACKED_TOOLS[tool_name]())
for tool_name in TRACKED_TOOLS)
return self._tools_next_versions
def update_dockerfile_versions(self):
dockerfile_changed = False
for tool in self.tools_current_versions:
# TODO: Refactor this method...
if self.dockerfile.force_version(tool):
logger.info("Update versions: %s has force_version" % tool)
continue
if tool == self.dockerfile.dockerfile_repo_name:
continue
current_version = self.tools_current_versions[tool]
next_version = self.tools_next_versions.get(tool, None)
if next_version is None:
logger.info("Update versions: %s has no next version" % tool)
continue
if current_version == next_version:
logger.info(
"Update versions: %s has no changed version" % tool)
continue
self.dockerfile.set_version(tool, next_version)
logger.info("Update versions: %s has next version %s" %
(tool, next_version))
dockerfile_changed = True
if dockerfile_changed:
self.dockerfile.set_next_version_dockerfile()
return dockerfile_changed
class DockerfileGeneratorLambda(UtilsMixin):
def __init__(self):
self.s3bucket = s3store.get_s3_bucket_manager()
self.dockerfile_repo = github.get_github_repository(
constants.DOCKERFILE_GITHUB_REPO)
self.dockerfile = jsonstore.get_dockerfile(self.dockerfile_repo)
self._internal_state = None
self.exit_code = 0
@property
def internal_state(self):
""" Get the state from AWS S3 json file, or use the one from Github,
if there is none."""
if self._internal_state is None:
internal_state = self.s3bucket.read_object(
constants.INTERNAL_STATE_FILE)
if internal_state is None:
logger.info("Internal state: No state from S3")
internal_state = self.dockerfile.dump
self.save_state_to_s3(internal_state)
self._internal_state = jsonstore.Store(internal_state)
return self._internal_state
def update_files_on_github(self):
template_dockerfile = self.dockerfile_repo.get_file_contents(
constants.TEMPLATE_GITHUB_DOCKERFILE_PATH)
template_readme = self.dockerfile_repo.get_file_contents(
constants.TEMPLATE_GITHUB_README_PATH)
commit_msg = self.dockerfile.update_summary(self.internal_state)
commit_files = [
(constants.INTERNAL_STATE_FILE, self.dockerfile.dump),
("Dockerfile", template_dockerfile.format(
**self.dockerfile.template_variables)),
("README.md", template_readme.format(
**self.dockerfile.template_variables))]
logger.info("Updating files on Github with message:\n\t%s" %
commit_msg)
self.dockerfile_repo.commit(commit_files, commit_msg)
def save_state_to_s3(self, content):
try:
logger.info("Saving state to S3")
self.s3bucket.write_object(constants.INTERNAL_STATE_FILE, content)
except (botocore.exceptions.ClientError, Exception) as e:
raise exceptions.LambdaException(
"Error: Uploading object to s3 bucket: %s" % (str(e)))
def main(self):
if self.update_dockerfile_versions():
self.update_files_on_github()
self.save_state_to_s3(self.dockerfile.dump)
return self.exit_code # Making Lambda Service happy
def lambda_handler():
return DockerfileGeneratorLambda().main()
| 39.765625
| 78
| 0.662279
| 4,406
| 0.865619
| 0
| 0
| 1,396
| 0.274263
| 0
| 0
| 580
| 0.113949
|
58af85873ae33fb22513395345bd12fb92d5791b
| 1,179
|
py
|
Python
|
ajustes_UM/tesis/main/urls.py
|
abelgonzalez/ajustes
|
f6f99aea18cfb82750805321abfc822d8a6ec5ed
|
[
"MIT"
] | 1
|
2015-03-04T13:04:33.000Z
|
2015-03-04T13:04:33.000Z
|
ajustes_UM/tesis/main/urls.py
|
abelgonzalez/ajustes
|
f6f99aea18cfb82750805321abfc822d8a6ec5ed
|
[
"MIT"
] | null | null | null |
ajustes_UM/tesis/main/urls.py
|
abelgonzalez/ajustes
|
f6f99aea18cfb82750805321abfc822d8a6ec5ed
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, url
from main import views
urlpatterns = patterns('',
url(r'^$', views.inicio, name='inicio'),
url(r'^acerca/', views.acerca, name='acerca'),
url(r'^contacto/', views.contacto, name='contacto'),
url(r'^autenticar/', views.autenticar, name='autenticar'),
url(r'^cerrar_sesion/', views.cerrar_sesion, name='cerrar_sesion'),
url(r'^tiempo/', views.tiempo, name='tiempo'),
url(r'^perfil/(?P<usuario>\d+)/$', views.perfil, name='perfil'),
url(r'^imprimir_ajuste/', views.imprimir_ajuste, name='imprimir_ajuste'),
url(r'^imprimir_ajusteId/(?P<ajusteEstudianteId>\d+)/$', views.imprimir_ajusteId,
name='imprimir_ajusteId'),
url(r'^imprimir_expediente/', views.imprimir_expediente, name='imprimir_expediente'),
url(r'^imprimir_expedienteId/(?P<expedienteEstudianteId>\d+)/$', views.imprimir_expedienteId,
name='imprimir_expedienteId'),
)
| 56.142857
| 116
| 0.546226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 407
| 0.345208
|
58afa3b02632f4e434958664a87781b4ca073a2a
| 394
|
py
|
Python
|
python/0011. maxArea.py
|
whtahy/leetcode
|
a2955123d203b155455ceefe38514fd0077d6db9
|
[
"CC0-1.0"
] | 1
|
2017-12-09T05:37:51.000Z
|
2017-12-09T05:37:51.000Z
|
python/0011. maxArea.py
|
whtahy/leetcode
|
a2955123d203b155455ceefe38514fd0077d6db9
|
[
"CC0-1.0"
] | null | null | null |
python/0011. maxArea.py
|
whtahy/leetcode
|
a2955123d203b155455ceefe38514fd0077d6db9
|
[
"CC0-1.0"
] | null | null | null |
class Solution:
def maxArea(self, ls):
n = len(ls) - 1
v, left, right = [], 0, n
while 0 <= left < right <= n:
h = min(ls[left], ls[right])
v += [h * (right - left)]
while ls[left] <= h and left < right:
left += 1
while ls[right] <= h and left < right:
right -= 1
return max(v)
| 30.307692
| 50
| 0.411168
| 393
| 0.997462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
58b28e8645b762a35a626046be48d346a6bd215a
| 15,595
|
py
|
Python
|
test/test_views.py
|
Nemoden/Simblin
|
1f97a985125023e64dfc6f4db6292cf3a2b904c9
|
[
"BSD-3-Clause"
] | 53
|
2015-02-01T14:06:48.000Z
|
2022-01-02T15:46:00.000Z
|
test/test_views.py
|
Aaron1992/Simblin
|
1f97a985125023e64dfc6f4db6292cf3a2b904c9
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_views.py
|
Aaron1992/Simblin
|
1f97a985125023e64dfc6f4db6292cf3a2b904c9
|
[
"BSD-3-Clause"
] | 23
|
2015-01-04T08:11:27.000Z
|
2019-11-24T13:18:25.000Z
|
# -*- coding: utf-8 -*-
"""
Simblin Test Views
~~~~~~~~~~~~~~~~~~
Test the different views of the blogging application.
:copyright: (c) 2010 by Eugen Kiss.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import datetime
import flask
from simblin.extensions import db
from simblin.models import Post, Tag, Category, post_tags, post_categories, Admin
from nose.tools import assert_equal, assert_true, assert_false
from test import TestCase
class ViewTestCase(TestCase):
"""Base TestClass for views"""
def register(self, username, password, password2='', email=''):
"""Helper function to register a user"""
return self.client.post('/register', data=dict(
username=username,
password=password,
password2=password2,
email=email,
), follow_redirects=True)
def login(self, username, password):
"""Helper function to login"""
return self.client.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def register_and_login(self, username, password):
"""Register and login in one go"""
self.register(username, password, password)
self.login(username, password)
def logout(self):
"""Helper function to logout"""
return self.client.get('/logout', follow_redirects=True)
def add_post(self, title, markup='', comments_allowed=None, visible=None,
tags='', categories=[]):
"""Helper functions to create a blog post"""
data=dict(
title=title,
markup=markup,
tags=tags,
action='Publish',
)
if comments_allowed is not None:
data['comments_allowed'] = True
if visible is not None:
data['visible'] = True
# Mimic select form fields
for i, category_id in enumerate(categories):
data['category-%d' % i] = category_id
return self.client.post('/compose', data=data, follow_redirects=True)
def update_post(self, slug, title, markup='', comments_allowed=None,
visible=None, tags=None, categories=[]):
"""Helper functions to create a blog post"""
data=dict(
title=title,
markup=markup,
tags=tags,
action='Update',
)
if comments_allowed is not None:
data['comments_allowed'] = True
if visible is not None:
data['visible'] = True
# Mimic select form fields
for i, category_id in enumerate(categories):
data['category-%d' % i] = category_id
return self.client.post('/update/%s' % slug, data=data,
follow_redirects=True)
def delete_post(self, slug):
"""Helper function to delete a blog post"""
return self.client.post('/_delete/%s' % slug, data=dict(next=''),
follow_redirects=True)
def add_category(self, name):
"""Register category in the database and return its id"""
return flask.json.loads(
self.client.post('/_add_category', data=dict(name=name)).data)['id']
def delete_category(self, id):
return self.client.post('/_delete_category', data=dict(id=id))
class TestRegistration(ViewTestCase):
def test_validation(self):
"""Test form validation"""
self.clear_db()
rv = self.register('', 'password')
assert 'You have to enter a username' in rv.data
rv = self.register('britney spears', '')
assert 'You have to enter a password' in rv.data
rv = self.register('barney', 'abv', 'abc')
assert 'Passwords must match' in rv.data
def test_registration(self):
"""Test successful registration and automatic login"""
self.clear_db()
with self.client:
rv = self.register('barney', 'abc', 'abc')
assert 'You are the new master of this blog' in rv.data
assert flask.session['logged_in']
def test_reregistration(self):
"""Test that only one admin can exist at a time and reregistration
with new credentials only works when logged in"""
self.clear_db()
rv = self.register('barney', 'abc', 'abc')
self.logout()
rv = self.register('barney', 'abc', 'abc')
assert 'There can only be one admin' in rv.data
self.login('barney', 'abc')
rv = self.register('moe', 'ugly', 'ugly') # clears the admin
rv = self.register('moe', 'ugly', 'ugly')
assert 'You are the new master of this blog' in rv.data
assert_equal(Admin.query.count(), 1)
class TestLogin(ViewTestCase):
def test_validation(self):
"""Test form validation"""
self.clear_db()
self.register('barney', 'abc', 'abc')
rv = self.login('borney', 'abc')
assert 'Invalid username' in rv.data
rv = self.login('barney', 'abd')
assert 'Invalid password' in rv.data
def test_login_logout(self):
"""Test logging in and out"""
self.clear_db()
self.register('barney', 'abc', 'abc')
with self.client:
rv = self.login('barney', 'abc')
assert 'You have been successfully logged in' in rv.data
assert flask.session['logged_in']
rv = self.logout()
assert 'You have been successfully logged out' in rv.data
assert 'logged_in' not in flask.session
class TestPost(ViewTestCase):
"""Tags and categories are tested alongside"""
def test_validation(self):
"""Check if form validation and validation in general works"""
self.clear_db()
self.register_and_login('barney', 'abc')
rv = self.add_post(title='', markup='a', tags='b')
assert 'You must provide a title' in rv.data
rv = self.update_post(title='a', markup='', tags='', slug='999x00')
assert 'Invalid slug' in rv.data
rv = self.add_post(title='a', markup='', tags='')
assert 'New post was successfully posted' in rv.data
def test_creation(self):
"""Test the blog post's fields' correctness after adding an
post and test proper category association"""
self.clear_db()
self.register_and_login('barney', 'abc')
title = "My post"
markup = "# Title"
tags = "django, franz und bertha,vil/bil"
category1_id = self.add_category('cool')
category2_id = self.add_category('cooler')
self.add_post(title=title, markup=markup, tags=tags,
categories=[category1_id, category1_id, category2_id])
post = Post.query.get(1)
post_tagnames = [tag.name for tag in post.tags]
category_names = [x.name for x in post.categories]
assert_equal(post.id, 1)
assert_equal(post.title, title)
assert_equal(post.markup, markup)
assert_false(post.comments_allowed)
assert_false(post.visible)
assert_equal(post.slug, 'my-post')
assert '<h1>Title</h1>' in post.html
assert_equal(post.datetime.date(), datetime.date.today())
assert_equal(sorted(post_tagnames),
sorted(['django','franz-und-bertha','vil-bil']))
assert_equal(sorted(category_names), sorted(['cool', 'cooler']))
assert_equal(Tag.query.count(), 3)
assert_equal(Category.query.count(), 2)
assert_equal(db.session.query(post_tags).count(), 3)
# Expect only two mappings although the mapping to category1
# has been added twice
assert_equal(db.session.query(post_categories).count(), 2)
# Add another post
self.add_post(title=post.title, tags=['django'], comments_allowed=True,
visible=True)
post2 = Post.query.get(2)
assert_equal(post2.title, post.title)
assert_true(post2.comments_allowed)
assert_true(post2.visible)
assert_equal(post2.slug, post.slug + '-2')
assert_equal(post2.categories, [])
assert_equal(Tag.query.count(), 3)
return post
def test_updating(self):
"""Test the blog post's fields' correctness after updating a post and
test the proper creation and automatic tidying of tags and tag
mappings and category associations"""
post = self.test_creation()
datetime = post.datetime
self.update_post(title='cool', markup='## Title', slug=post.slug,
tags=['django'], comments_allowed=True, visible=True)
updated_post = Post.query.get(1)
assert_equal(updated_post.title, 'cool')
assert_equal(updated_post.markup, '## Title')
assert_true(updated_post.comments_allowed)
assert_true(updated_post.visible)
assert_equal(updated_post.slug, 'cool')
assert '<h2>Title</h2>' in updated_post.html
assert_equal(updated_post.datetime, datetime)
assert_equal([x.name for x in updated_post.tags], ['django'])
# Expect two rows in the posts table because two posts were
# created and one updated. Expect only one row in the tags table
# because only 'django' is used as a tag.
assert_equal(Post.query.count(), 2)
assert_equal(Tag.query.count(), 1)
# Because there are two post with a tag expect two rows
# in the post_tag association table
assert_equal(db.session.query(post_tags).count(), 2)
# Because there is no post in a category anymore expect not rows
# in the post_categories association table
assert_equal(db.session.query(post_categories).count(), 0)
def test_deletion(self):
"""Test the deletion of a blog post and the accompanying deletion of
tags"""
self.clear_db()
self.register_and_login('barney', 'abc')
self.add_post(title='Title', markup='', tags='cool')
posts = Post.query.all()
tags = Tag.query.all()
assert_equal(len(posts), 1)
assert_equal(len(tags), 1)
rv = self.delete_post(slug='idontexist')
assert 'No such post' in rv.data
rv = self.delete_post(slug='title')
assert 'Post deleted' in rv.data
posts = Post.query.all()
tags = Tag.query.all()
assert_equal(len(posts), 0)
assert_equal(len(tags), 0)
def test_singleview(self):
"""Test the displaying of one blog post"""
self.clear_db()
self.register_and_login('barney', 'abc')
self.add_post(title='Title', markup='', visible=True)
rv = self.client.get('/post/title')
self.assert_200(rv)
assert 'Title' in rv.data
self.add_post(title='Title2', visible=None)
rv = self.client.get('/post/title2')
self.assert_200(rv)
assert 'Title2' in rv.data
self.logout()
rv = self.client.get('/post/title')
self.assert_200(rv)
assert 'Title' in rv.data
rv = self.client.get('/post/title2')
self.assert_404(rv)
def test_multipleview(self):
"""Test the displaying of multiple blog posts on home page"""
self.clear_db()
self.register_and_login('barney', 'abc')
self.add_post(title='Title', markup='', visible=True)
self.add_post(title='Title2', visible=None)
self.logout()
rv = self.client.get('/')
self.assert_200(rv)
assert 'Title' in rv.data
assert 'Title2' not in rv.data
class TestArchives(ViewTestCase):
def test_archives_page(self):
"""Test the displaying of the archives page"""
self.clear_db()
rv = self.client.get('/archives/')
self.assert_200(rv)
def test_month_view(self):
"""Test the displaying of the month view"""
self.clear_db()
self.register_and_login('barney', 'abc')
post = Post('the chronic 2001', visible=False)
post.datetime = datetime.datetime(1999, 11, 16)
db.session.add(post)
db.session.commit()
rv = self.client.get('/1999/11/')
self.assert_200(rv)
assert 'the chronic 2001' in rv.data
rv = self.client.get('/7777/12/')
assert 'No entries here so far' in rv.data
rv = self.client.get('/1999/14/')
self.assert_404(rv)
self.logout()
rv = self.client.get('/1999/11/')
self.assert_200(rv)
assert 'No entries here so far' in rv.data
class TestTag(ViewTestCase):
def test_view(self):
"""Test the displaying of the tag view"""
self.clear_db()
self.register_and_login('barney', 'abc')
tag = Tag('drdre')
db.session.add(tag)
db.session.commit()
post = Post('the chronic 2001', visible=True)
post2 = Post('the chronic 2002', visible=False)
post._tags = [tag]
post2._tags = [tag]
db.session.add(post)
db.session.add(post2)
db.session.commit()
rv = self.client.get('/tag/drdre/')
self.assert_200(rv)
assert 'the chronic 2001' in rv.data
rv = self.client.get('/tag/bobbybrown/')
self.assert_404(rv)
self.logout()
rv = self.client.get('/tag/drdre/')
self.assert_200(rv)
assert 'the chronic 2001' in rv.data
assert 'the chronic 2002' not in rv.data
class TestCategory(ViewTestCase):
def test_view(self):
"""Test the displaying of the category view"""
self.clear_db()
self.register_and_login('barney', 'abc')
category = Category('drdre')
db.session.add(category)
db.session.commit()
post = Post('the chronic', visible=True)
post2 = Post('the chrinoc', visible=False)
post._categories = [category]
post2._categories = [category]
db.session.add(post)
db.session.add(post2)
db.session.commit()
rv = self.client.get('/category/drdre/')
self.assert_200(rv)
assert 'the chronic' in rv.data
rv = self.client.get('/category/sugeknight/')
self.assert_404(rv)
self.logout()
rv = self.client.get('/category/drdre/')
self.assert_200(rv)
assert 'the chronic' in rv.data
assert 'the chrinoc' not in rv.data
rv = self.client.get('/uncategorized/')
self.assert_200(rv)
assert 'Uncategorized posts' in rv.data
post2 = Post('dancing in the moonlight')
db.session.add(post2)
db.session.commit()
rv = self.client.get('/uncategorized/')
self.assert_200(rv)
assert 'dancing in the moonlight' in rv.data
def test_deletion_view(self):
"""Test if deletion works properly"""
self.clear_db()
self.register_and_login('barney', 'abc')
category = Category('drdre')
db.session.add(category)
db.session.commit()
assert_equal(Category.query.count(), 1)
rv = self.delete_category(1)
print rv
assert_equal(Category.query.count(), 0)
| 35.93318
| 81
| 0.58833
| 15,005
| 0.962167
| 0
| 0
| 0
| 0
| 0
| 0
| 4,102
| 0.263033
|
58b31cded44ccfc6677efd1c2715c62d51feaad9
| 14,174
|
py
|
Python
|
freeclimb/models/message_result.py
|
FreeClimbAPI/python-sdk
|
1ec89eddc0069a39989579552b979a9d21418117
|
[
"MIT"
] | null | null | null |
freeclimb/models/message_result.py
|
FreeClimbAPI/python-sdk
|
1ec89eddc0069a39989579552b979a9d21418117
|
[
"MIT"
] | 6
|
2020-03-03T20:14:26.000Z
|
2021-12-06T22:11:15.000Z
|
freeclimb/models/message_result.py
|
FreeClimbAPI/python-sdk
|
1ec89eddc0069a39989579552b979a9d21418117
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
FreeClimb API
FreeClimb is a cloud-based application programming interface (API) that puts the power of the Vail platform in your hands. FreeClimb simplifies the process of creating applications that can use a full range of telephony features without requiring specialized or on-site telephony equipment. Using the FreeClimb REST API to write applications is easy! You have the option to use the language of your choice or hit the API directly. Your application can execute a command by issuing a RESTful request to the FreeClimb API. The base URL to send HTTP requests to the FreeClimb REST API is: /apiserver. FreeClimb authenticates and processes your request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@freeclimb.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from freeclimb.configuration import Configuration
class MessageResult(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'uri': 'str',
'date_created': 'str',
'date_updated': 'str',
'revision': 'int',
'account_id': 'str',
'message_id': 'str',
'status': 'str',
'_from': 'str',
'to': 'str',
'text': 'str',
'direction': 'str',
'notification_url': 'str'
}
attribute_map = {
'uri': 'uri',
'date_created': 'dateCreated',
'date_updated': 'dateUpdated',
'revision': 'revision',
'account_id': 'accountId',
'message_id': 'messageId',
'status': 'status',
'_from': 'from',
'to': 'to',
'text': 'text',
'direction': 'direction',
'notification_url': 'notificationUrl'
}
def __init__(self, uri=None, date_created=None, date_updated=None, revision=None, account_id=None, message_id=None, status=None, _from=None, to=None, text=None, direction=None, notification_url=None, local_vars_configuration=None): # noqa: E501
"""MessageResult - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uri = None
self._date_created = None
self._date_updated = None
self._revision = None
self._account_id = None
self._message_id = None
self._status = None
self.__from = None
self._to = None
self._text = None
self._direction = None
self._notification_url = None
self.discriminator = None
if uri is not None:
self.uri = uri
if date_created is not None:
self.date_created = date_created
if date_updated is not None:
self.date_updated = date_updated
if revision is not None:
self.revision = revision
if account_id is not None:
self.account_id = account_id
if message_id is not None:
self.message_id = message_id
if status is not None:
self.status = status
if _from is not None:
self._from = _from
if to is not None:
self.to = to
if text is not None:
self.text = text
if direction is not None:
self.direction = direction
if notification_url is not None:
self.notification_url = notification_url
@property
def uri(self):
"""Gets the uri of this MessageResult. # noqa: E501
The URI for this resource, relative to /apiserver. # noqa: E501
:return: The uri of this MessageResult. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this MessageResult.
The URI for this resource, relative to /apiserver. # noqa: E501
:param uri: The uri of this MessageResult. # noqa: E501
:type: str
"""
self._uri = uri
@property
def date_created(self):
"""Gets the date_created of this MessageResult. # noqa: E501
The date that this resource was created (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). # noqa: E501
:return: The date_created of this MessageResult. # noqa: E501
:rtype: str
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""Sets the date_created of this MessageResult.
The date that this resource was created (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). # noqa: E501
:param date_created: The date_created of this MessageResult. # noqa: E501
:type: str
"""
self._date_created = date_created
@property
def date_updated(self):
"""Gets the date_updated of this MessageResult. # noqa: E501
The date that this resource was last updated (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). # noqa: E501
:return: The date_updated of this MessageResult. # noqa: E501
:rtype: str
"""
return self._date_updated
@date_updated.setter
def date_updated(self, date_updated):
"""Sets the date_updated of this MessageResult.
The date that this resource was last updated (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). # noqa: E501
:param date_updated: The date_updated of this MessageResult. # noqa: E501
:type: str
"""
self._date_updated = date_updated
@property
def revision(self):
"""Gets the revision of this MessageResult. # noqa: E501
Revision count for the resource. This count is set to 1 on creation and is incremented every time it is updated. # noqa: E501
:return: The revision of this MessageResult. # noqa: E501
:rtype: int
"""
return self._revision
@revision.setter
def revision(self, revision):
"""Sets the revision of this MessageResult.
Revision count for the resource. This count is set to 1 on creation and is incremented every time it is updated. # noqa: E501
:param revision: The revision of this MessageResult. # noqa: E501
:type: int
"""
self._revision = revision
@property
def account_id(self):
"""Gets the account_id of this MessageResult. # noqa: E501
String that uniquely identifies this account resource. # noqa: E501
:return: The account_id of this MessageResult. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this MessageResult.
String that uniquely identifies this account resource. # noqa: E501
:param account_id: The account_id of this MessageResult. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def message_id(self):
"""Gets the message_id of this MessageResult. # noqa: E501
String that uniquely identifies this message resource # noqa: E501
:return: The message_id of this MessageResult. # noqa: E501
:rtype: str
"""
return self._message_id
@message_id.setter
def message_id(self, message_id):
"""Sets the message_id of this MessageResult.
String that uniquely identifies this message resource # noqa: E501
:param message_id: The message_id of this MessageResult. # noqa: E501
:type: str
"""
self._message_id = message_id
@property
def status(self):
"""Gets the status of this MessageResult. # noqa: E501
Indicates the state of the message through the message lifecycle including: new, queued, rejected, sending, sent, failed, received, undelivered, expired, deleted, and unknown # noqa: E501
:return: The status of this MessageResult. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this MessageResult.
Indicates the state of the message through the message lifecycle including: new, queued, rejected, sending, sent, failed, received, undelivered, expired, deleted, and unknown # noqa: E501
:param status: The status of this MessageResult. # noqa: E501
:type: str
"""
allowed_values = ["new", "queued", "rejected", "sending", "sent", "failed", "received", "undelivered", "expired", "deleted", "unknown"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def _from(self):
"""Gets the _from of this MessageResult. # noqa: E501
Phone number in E.164 format that sent the message. # noqa: E501
:return: The _from of this MessageResult. # noqa: E501
:rtype: str
"""
return self.__from
@_from.setter
def _from(self, _from):
"""Sets the _from of this MessageResult.
Phone number in E.164 format that sent the message. # noqa: E501
:param _from: The _from of this MessageResult. # noqa: E501
:type: str
"""
self.__from = _from
@property
def to(self):
"""Gets the to of this MessageResult. # noqa: E501
Phone number in E.164 format that received the message. # noqa: E501
:return: The to of this MessageResult. # noqa: E501
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this MessageResult.
Phone number in E.164 format that received the message. # noqa: E501
:param to: The to of this MessageResult. # noqa: E501
:type: str
"""
self._to = to
@property
def text(self):
"""Gets the text of this MessageResult. # noqa: E501
Message contents # noqa: E501
:return: The text of this MessageResult. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this MessageResult.
Message contents # noqa: E501
:param text: The text of this MessageResult. # noqa: E501
:type: str
"""
self._text = text
@property
def direction(self):
"""Gets the direction of this MessageResult. # noqa: E501
Noting whether the message was inbound or outbound # noqa: E501
:return: The direction of this MessageResult. # noqa: E501
:rtype: str
"""
return self._direction
@direction.setter
def direction(self, direction):
"""Sets the direction of this MessageResult.
Noting whether the message was inbound or outbound # noqa: E501
:param direction: The direction of this MessageResult. # noqa: E501
:type: str
"""
self._direction = direction
@property
def notification_url(self):
"""Gets the notification_url of this MessageResult. # noqa: E501
URL invoked when message sent # noqa: E501
:return: The notification_url of this MessageResult. # noqa: E501
:rtype: str
"""
return self._notification_url
@notification_url.setter
def notification_url(self, notification_url):
"""Sets the notification_url of this MessageResult.
URL invoked when message sent # noqa: E501
:param notification_url: The notification_url of this MessageResult. # noqa: E501
:type: str
"""
self._notification_url = notification_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.to_camel_case(attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif value is None:
continue
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MessageResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MessageResult):
return True
return self.to_dict() != other.to_dict()
def to_camel_case(self, snake_str):
components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
| 31.851685
| 667
| 0.605686
| 13,224
| 0.932976
| 0
| 0
| 8,421
| 0.594116
| 0
| 0
| 8,162
| 0.575843
|
58b39d610eae8b36afa5ec0f450ede4efe4c78d4
| 342
|
py
|
Python
|
blog/views.py
|
artkapl/django-blog-project
|
16494465042dd6846f3a2cd560c0cfe7737cc8e0
|
[
"MIT"
] | null | null | null |
blog/views.py
|
artkapl/django-blog-project
|
16494465042dd6846f3a2cd560c0cfe7737cc8e0
|
[
"MIT"
] | null | null | null |
blog/views.py
|
artkapl/django-blog-project
|
16494465042dd6846f3a2cd560c0cfe7737cc8e0
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .models import Post
def home(request):
context = {
'posts': Post.objects.all()
}
return render(request=request, template_name='blog/home.html', context=context)
def about(request):
return render(request=request, template_name='blog/about.html', context={'title': 'About'})
| 24.428571
| 95
| 0.701754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.157895
|
58b410177d19ca32957d1f156f14c3e7bd5b9089
| 718
|
bzl
|
Python
|
az/private/common/utils.bzl
|
jullianoacqio/rules_microsoft_azure
|
85f8d633db46c7c6aefd5c9c1856aa57473d10fb
|
[
"Apache-2.0"
] | 4
|
2020-08-05T18:21:43.000Z
|
2020-11-10T19:42:48.000Z
|
terraform/private/common/utils.bzl
|
acqio/rules_hashicorp_terraform
|
add89d62abf3739dbd8908a43df366511027e4fc
|
[
"Apache-2.0"
] | 3
|
2020-09-01T14:35:18.000Z
|
2021-03-29T17:27:12.000Z
|
az/private/common/utils.bzl
|
acqio/rules_microsoft_azure
|
85f8d633db46c7c6aefd5c9c1856aa57473d10fb
|
[
"Apache-2.0"
] | 7
|
2020-08-04T20:14:10.000Z
|
2021-02-18T17:10:55.000Z
|
def _check_stamping_format(f):
if f.startswith("{") and f.endswith("}"):
return True
return False
def _resolve_stamp(ctx, string, output):
stamps = [ctx.info_file, ctx.version_file]
args = ctx.actions.args()
args.add_all(stamps, format_each = "--stamp-info-file=%s")
args.add(string, format = "--format=%s")
args.add(output, format = "--output=%s")
ctx.actions.run(
executable = ctx.executable._stamper,
arguments = [args],
inputs = stamps,
tools = [ctx.executable._stamper],
outputs = [output],
mnemonic = "Stamp",
)
utils = struct(
resolve_stamp = _resolve_stamp,
check_stamping_format = _check_stamping_format,
)
| 28.72
| 62
| 0.628134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.084958
|
58b4a5438c1537dcf99f56657476da7aa2cae99e
| 4,166
|
py
|
Python
|
hue.py
|
desheffer/hue-adapter
|
724e296c8dd52302c0380a58f4390fc3059705dc
|
[
"MIT"
] | null | null | null |
hue.py
|
desheffer/hue-adapter
|
724e296c8dd52302c0380a58f4390fc3059705dc
|
[
"MIT"
] | null | null | null |
hue.py
|
desheffer/hue-adapter
|
724e296c8dd52302c0380a58f4390fc3059705dc
|
[
"MIT"
] | null | null | null |
from config import Config
import flask
import json
import os
from ssdp import SSDP
from threading import Thread
import urllib3
config = None
config_file_paths = [
os.path.dirname(os.path.realpath(__file__)) + "/config/default.cfg.local",
"/etc/hue-adapter/default.cfg.local",
]
for config_file_path in config_file_paths:
if os.path.isfile(config_file_path):
config = Config(file(config_file_path))
if not config:
print "Cannot find configuration file"
exit(1)
app = flask.Flask(__name__)
@app.route("/setup.xml")
def get_setup_file():
"""Serve the SSDP setup file."""
out = "<?xml version=\"1.0\"?>\n" + \
"<root xmlns=\"urn:schemas-upnp-org:device-1-0\">\n" + \
"<specVersion>\n" + \
"<major>1</major>\n" + \
"<minor>0</minor>\n" + \
"</specVersion>\n" + \
"<URLBase>http://%s:%d/</URLBase>\n" % (config.web.addr, config.web.port) + \
"<device>\n" + \
"<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>\n" + \
"<friendlyName>Philips Hue Emulator</friendlyName>\n" + \
"<manufacturer>Royal Philips Electronics</manufacturer>\n" + \
"<manufacturerURL></manufacturerURL>\n" + \
"<modelDescription>Philips Hue Emulator</modelDescription>\n" + \
"<modelName>Philips hue bridge 2012</modelName>\n" + \
"<modelNumber>929000226503</modelNumber>\n" + \
"<modelURL></modelURL>\n" + \
"<serialNumber>00000000000000000001</serialNumber>\n" + \
"<UDN>uuid:776c1cbc-790a-425f-a890-a761ec57513c</UDN>\n" + \
"</device>\n" + \
"</root>\n"
return flask.Response(out, mimetype="text/xml")
@app.route("/api/<username>/lights", methods=["GET"])
def get_all_lights(username):
"""Get all lights"""
out = {}
for id, light in config.lights.iteritems():
out[id] = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"type": "Extended color light",
"name": light["name"],
"modelid": "LCT001",
"swversion": "6609461",
"pointsymbol": {},
}
return flask.jsonify(out)
@app.route("/api/<username>/lights/<id>", methods=["GET"])
def get_light(username, id):
"""Get light attributes and state"""
if id in config.lights:
light = config.lights[id]
else:
return "", 3
out = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"type": "Extended color light",
"name": light["name"],
"modelid": "LCT001",
"swversion": "6609461",
"pointsymbol": {},
}
return flask.jsonify(out)
@app.route("/api/<username>/lights/<id>/state", methods=["PUT"])
def set_lights_state(username, id):
"""Set light state"""
if id in config.lights:
light = config.lights[id]
else:
return "", 3
data = flask.request.get_json(force=True)
if not data or "on" not in data:
return "", 6
if data["on"]:
url = light["on_url"]
else:
url = light["off_url"]
try:
http = urllib3.PoolManager()
r = http.request("GET", url)
except:
return "", 901
out = [
{
"success": {
"/lights/" + id + "/state/on": data["on"]
}
}
]
return flask.Response(json.dumps(out), mimetype="text/json")
if __name__ == "__main__":
ssdp = SSDP(config.web.addr, config.web.port)
ssdp_thread = Thread(target=ssdp.run)
ssdp_thread.setDaemon(True)
ssdp_thread.start()
app.run(host=config.web.addr, port=config.web.port)
| 26.877419
| 87
| 0.520163
| 0
| 0
| 0
| 0
| 3,404
| 0.817091
| 0
| 0
| 1,504
| 0.361018
|
58b676c8df734180c643826f2bc368889a0790b4
| 2,820
|
py
|
Python
|
safe/geokdbush/kdbushTest.py
|
s-a-f-e/backend
|
6018f51466df9abd58f25729d91856842eee9509
|
[
"MIT"
] | 1
|
2019-05-06T19:40:43.000Z
|
2019-05-06T19:40:43.000Z
|
safe/geokdbush/kdbushTest.py
|
s-a-f-e/backend
|
6018f51466df9abd58f25729d91856842eee9509
|
[
"MIT"
] | 9
|
2019-12-04T22:57:46.000Z
|
2022-02-10T07:15:11.000Z
|
safe/geokdbush/kdbushTest.py
|
s-a-f-e/backend
|
6018f51466df9abd58f25729d91856842eee9509
|
[
"MIT"
] | 3
|
2019-05-01T20:41:33.000Z
|
2019-10-03T20:57:00.000Z
|
from kdbush import KDBush
# test data
points = [
[54,1],[97,21],[65,35],[33,54],[95,39],[54,3],[53,54],[84,72],[33,34],[43,15],[52,83],[81,23],[1,61],[38,74],
[11,91],[24,56],[90,31],[25,57],[46,61],[29,69],[49,60],[4,98],[71,15],[60,25],[38,84],[52,38],[94,51],[13,25],
[77,73],[88,87],[6,27],[58,22],[53,28],[27,91],[96,98],[93,14],[22,93],[45,94],[18,28],[35,15],[19,81],[20,81],
[67,53],[43,3],[47,66],[48,34],[46,12],[32,38],[43,12],[39,94],[88,62],[66,14],[84,30],[72,81],[41,92],[26,4],
[6,76],[47,21],[57,70],[71,82],[50,68],[96,18],[40,31],[78,53],[71,90],[32,14],[55,6],[32,88],[62,32],[21,67],
[73,81],[44,64],[29,50],[70,5],[6,22],[68,3],[11,23],[20,42],[21,73],[63,86],[9,40],[99,2],[99,76],[56,77],
[83,6],[21,72],[78,30],[75,53],[41,11],[95,20],[30,38],[96,82],[65,48],[33,18],[87,28],[10,10],[40,34],
[10,20],[47,29],[46,78]]
ids = [
97, 74, 95, 30, 77, 38, 76, 27, 80, 55, 72, 90, 88, 48, 43, 46, 65, 39, 62, 93, 9, 96, 47, 8, 3, 12, 15, 14, 21, 41, 36, 40, 69, 56, 85, 78, 17, 71, 44,
19, 18, 13, 99, 24, 67, 33, 37, 49, 54, 57, 98, 45, 23, 31, 66, 68, 0, 32, 5, 51, 75, 73, 84, 35, 81, 22, 61, 89, 1, 11, 86, 52, 94, 16, 2, 6, 25, 92,
42, 20, 60, 58, 83, 79, 64, 10, 59, 53, 26, 87, 4, 63, 50, 7, 28, 82, 70, 29, 34, 91]
coords = [
10,20,6,22,10,10,6,27,20,42,18,28,11,23,13,25,9,40,26,4,29,50,30,38,41,11,43,12,43,3,46,12,32,14,35,15,40,31,33,18,
43,15,40,34,32,38,33,34,33,54,1,61,24,56,11,91,4,98,20,81,22,93,19,81,21,67,6,76,21,72,21,73,25,57,44,64,47,66,29,
69,46,61,38,74,46,78,38,84,32,88,27,91,45,94,39,94,41,92,47,21,47,29,48,34,60,25,58,22,55,6,62,32,54,1,53,28,54,3,
66,14,68,3,70,5,83,6,93,14,99,2,71,15,96,18,95,20,97,21,81,23,78,30,84,30,87,28,90,31,65,35,53,54,52,38,65,48,67,
53,49,60,50,68,57,70,56,77,63,86,71,90,52,83,71,82,72,81,94,51,75,53,95,39,78,53,88,62,84,72,77,73,99,76,73,81,88,
87,96,98,96,82]
index = KDBush(points)
result = index.range(20, 30, 50, 70)
print(result) # [60, 20, 45, 3, 17, 71, 44, 19, 18, 15, 69, 90, 62, 96, 47, 8, 77, 72]
for id in result:
p = points[id]
if p[0] < 20 or p[0] > 50 or p[1] < 30 or p[1] > 70:
print("FAIL")
for id in result:
p = points[id]
if id not in result and p[0] >= 20 and p[0] <= 50 and p[1] >= 30 and p[1] <= 70:
print("FAIL: outside point not in range")
def sqDist2(a, b):
dx = a[0] - b[0]
dy = a[1] - b[1]
return dx * dx + dy * dy;
index2 = KDBush(points)
qp = [50, 50]
r = 20
r2 = 20 * 20
result = index.within(qp[0], qp[1], r)
print(result) # [60, 6, 25, 92, 42, 20, 45, 3, 71, 44, 18, 96]
for id in result:
p = points[id]
if (sqDist2(p, qp) > r2): print('FAIL: result point in range')
for id in result:
p = points[id]
if (id not in result and sqDist2(p, qp) <= r2):
print('FAIL: result point not in range')
| 46.229508
| 156
| 0.537234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 0.082624
|
58b7a94417cb2c171bbf2548469ad555f0dc6eca
| 6,662
|
py
|
Python
|
buckit/compiler.py
|
martarozek/buckit
|
343cc5a5964c1d43902b6a77868652adaefa0caa
|
[
"BSD-3-Clause"
] | null | null | null |
buckit/compiler.py
|
martarozek/buckit
|
343cc5a5964c1d43902b6a77868652adaefa0caa
|
[
"BSD-3-Clause"
] | null | null | null |
buckit/compiler.py
|
martarozek/buckit
|
343cc5a5964c1d43902b6a77868652adaefa0caa
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import os
import subprocess
import platform
from constants import BUCKCONFIG_LOCAL
from configure_buck import update_config
def get_current_platform_flavor():
platforms = {
'Darwin': 'macos',
'Linux': 'linux',
'Windows': 'windows',
}
return platforms.get(platform.system(), 'default')
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program, get_canonical=False):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return os.path.realpath(program) if get_canonical else program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return os.path.realpath(exe_file) if get_canonical else exe_file
return None
def detect_py2():
return which('python2')
def detect_py3():
return which('python3', get_canonical=True)
def detect_python_libs(python):
# We want to strip version and site-packages off from the path to get lib
# path
return subprocess.check_output([
python,
'-c',
(
'from __future__ import print_function; '
'from distutils import sysconfig; '
'import os; '
'print(os.sep.join(sysconfig.get_python_lib().split(os.sep)[:-2]))'
)]).decode('utf-8').split('\n')[0]
def detect_python_include(python):
return subprocess.check_output([
python,
'-c',
(
'from __future__ import print_function; '
'from distutils import sysconfig; '
'print(sysconfig.get_python_inc())'
)]).decode('utf-8').split('\n')[0]
def get_system_lib_paths():
libs = {
'linux': [
'/usr/local/lib64',
'/usr/local/lib',
'/usr/lib64',
'/usr/lib',
'/lib64',
'/lib',
],
'macos': [
'/usr/local/lib',
'/usr/local/opt/{name}/lib',
'/usr/lib',
],
}
return libs[get_current_platform_flavor()]
def detect_cc():
if 'CC' in os.environ:
return os.environ['CC']
clang = which('clang')
if clang:
return clang
gcc = which('gcc')
if gcc:
return gcc
def detect_cxx():
if 'CXX' in os.environ:
return os.environ['CXX']
clang_pp = which('clang++')
if clang_pp:
return clang_pp
g_pp = which('g++')
if g_pp:
return g_pp
return None
def detect_c_standard(compiler_cmd):
versions = [
'-std=gnu11',
'-std=c11',
'-std=gnu99',
'-std=c99',
]
for version in versions:
logging.debug("Checking %s support for -std=%s", compiler_cmd, version)
cmd = [compiler_cmd, version, '-x', 'c', '-']
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
stdout, stderr = proc.communicate(
'int main() { return 0; }'.encode('utf-8')
)
if proc.returncode != 0:
logging.debug(
"Got return code %s, output: %s. trying next", proc.returncode,
stdout
)
else:
return version
return None
def detect_cxx_standard(compiler_cmd):
versions = [
# '-std=gnu++1z',
# '-std=c++1z',
'-std=gnu++14',
'-std=c++14',
'-std=gnu++1y',
'-std=c++1y',
'-std=gnu++11',
'-std=c++11',
]
for version in versions:
logging.debug("Checking %s support for -std=%s", compiler_cmd, version)
cmd = [compiler_cmd, version, '-x', 'c++', '-']
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
stdout, stderr = proc.communicate(
'int main() { return 0; }'.encode('utf-8')
)
if proc.returncode != 0:
logging.debug(
"Got return code %s, output: %s. trying next", proc.returncode,
stdout
)
else:
return version
return None
def configure_compiler(project_root):
"""
Sets up .buckconfig.local in the root project with
basic c++/c compiler settings. More advanced probing
will probably be done in the future
"""
buckconfig_local = os.path.join(project_root, BUCKCONFIG_LOCAL)
logging.info("{bold}Detecting compiler{clear}")
current_platform = get_current_platform_flavor()
cc = detect_cc()
cxx = detect_cxx()
if not cc or not cxx:
logging.warn("Could not find clang or g++ in PATH")
return 0
c_standard = detect_c_standard(cc)
if c_standard:
cflags = [c_standard]
else:
cflags = []
cxx_standard = detect_cxx_standard(cxx)
if cxx_standard:
cxxflags = [cxx_standard]
else:
cxxflags = []
py2 = detect_py2()
py3 = detect_py3()
py2_include = detect_python_include(py2)
py2_libs = detect_python_libs(py2)
py3_include = detect_python_include(py3)
py3_libs = detect_python_libs(py3)
to_set = {
'cxx': {
'cflags': cflags + ['-pthread', '-g'],
'cxxflags': cxxflags + ['-pthread', '-g'],
'ldflags': ['-pthread'],
'cxx': [cxx],
'cc': [cc],
},
}
to_set['cxx#' + current_platform] = to_set['cxx'].copy()
to_set['cxx']['default_platform'] = current_platform
py2_settings = {
'interpreter': py2,
'includes': py2_include,
'libs': py2_libs,
}
py3_settings = {
'interpreter': py3,
'includes': py3_include,
'libs': py3_libs,
}
if py2:
to_set['python#py2'] = py2_settings
to_set['python#py2-%s' % current_platform] = py2_settings
if py3:
to_set['python#py3'] = py3_settings
to_set['python#py3-%s' % current_platform] = py3_settings
to_set['buckit'] = {'system_lib_paths': ','.join(get_system_lib_paths())}
update_config(project_root, buckconfig_local, to_set)
return 0
| 25.427481
| 80
| 0.564545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,832
| 0.274992
|
58b7d71f96b456407bfe5eac83c75c43ba5fd90a
| 8,002
|
py
|
Python
|
test/python/.dbwebb/test/suite.d/kmom06/analyzer/test_analyzer.py
|
AndreasArne/python-examination
|
a24297f3f73b181e64e744d0b8b52d88d03e844b
|
[
"MIT"
] | null | null | null |
test/python/.dbwebb/test/suite.d/kmom06/analyzer/test_analyzer.py
|
AndreasArne/python-examination
|
a24297f3f73b181e64e744d0b8b52d88d03e844b
|
[
"MIT"
] | 14
|
2021-02-09T10:40:43.000Z
|
2022-02-18T12:24:39.000Z
|
test/python/.dbwebb/test/suite.d/kmom06/analyzer/test_analyzer.py
|
AndreasArne/python-examination
|
a24297f3f73b181e64e744d0b8b52d88d03e844b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Contains testcases for the individual examination.
"""
import unittest
from io import StringIO
import os
import sys
from unittest.mock import patch
from examiner import ExamTestCase, ExamTestResult, tags
from examiner import import_module, find_path_to_assignment
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_PATH = find_path_to_assignment(FILE_DIR)
if REPO_PATH not in sys.path:
sys.path.insert(0, REPO_PATH)
# Path to file and basename of the file to import
main = import_module(REPO_PATH, "main")
class Test1Files(ExamTestCase):
"""
Each assignment has 1 testcase with multiple asserts.
The different asserts https://docs.python.org/3.6/library/unittest.html#test-cases
"""
class Test2Counters(ExamTestCase):
"""
Meny options for counting
"""
@classmethod
def setUpClass(cls):
# Otherwise the .txt files will not be found
os.chdir(REPO_PATH)
@tags("count", "lines")
def test_b_lines(self):
"""
Testar att anropa menyval 'lines' i main.py.
Använder följande som input:
{arguments}
Förväntar att följande finns med i utskrift:
{correct}
Fick följande:
{student}
"""
self.norepr = True
self._multi_arguments = ["lines", "", "q"]
with patch('builtins.input', side_effect=self._multi_arguments):
with patch('sys.stdout', new=StringIO()) as fake_out:
main.main()
str_data = fake_out.getvalue()
self.assertIn("17", str_data)
@tags("count", "words")
def test_c_words(self):
"""
Testar att anropa menyval 'words' i main.py.
Använder följande som input:
{arguments}
Förväntar att följande finns med i utskrift:
{correct}
Fick följande:
{student}
"""
self.norepr = True
self._multi_arguments = ["words", "", "q"]
with patch('builtins.input', side_effect=self._multi_arguments):
with patch('sys.stdout', new=StringIO()) as fake_out:
main.main()
str_data = fake_out.getvalue()
self.assertIn("199", str_data)
@tags("count", "letters")
def test_d_letters(self):
"""
Testar att anropa menyval 'letters' i main.py.
Använder följande som input:
{arguments}
Förväntar att följande finns med i utskrift:
{correct}
Fick följande:
{student}
"""
self.norepr = True
self._multi_arguments = ["letters", "", "q"]
self.norepr = True
with patch('builtins.input', side_effect=self._multi_arguments):
with patch('sys.stdout', new=StringIO()) as fake_out:
main.main()
str_data = fake_out.getvalue()
self.assertIn("907", str_data)
class Test3Frequencies(ExamTestCase):
"""
Meny options for frequency
"""
def check_print_contain(self, inp, correct):
"""
One function for testing print input functions.
"""
with patch("builtins.input", side_effect=inp):
with patch("sys.stdout", new=StringIO()) as fake_out:
main.main()
for val in correct:
str_data = fake_out.getvalue()
self.assertIn(val, str_data)
@tags("freq", "word_frequency")
def test_a_word_frequency(self):
"""
Testar att anropa menyval 'word_frequency' i main.py.
Använder följande som input:
{arguments}
Förväntar att följande finns med i utskrift:
{correct}
Fick följande:
{student}
"""
self.norepr = True
self._multi_arguments = ["word_frequency", "", "q"]
self.check_print_contain(self._multi_arguments, [
"the: 12 | 6.0%",
"to: 8 | 4.0%",
"and: 7 | 3.5%",
"of: 6 | 3.0%",
"street: 5 | 2.5%",
"him: 5 | 2.5%",
"he: 5 | 2.5%",
])
@tags("freq", "letter_frequency")
def test_b_letter_frequency(self):
"""
Testar att anropa menyval 'letter_frequency' i main.py.
Använder följande som input:
{arguments}
Förväntar att följande finns med i utskrift:
{correct}
Fick följande:
{student}
"""
self.norepr = True
self._multi_arguments = ["letter_frequency", "", "q"]
self.check_print_contain(self._multi_arguments, [
"e: 108 | 11.9%",
"t: 91 | 10.0%",
"o: 77 | 8.5%",
"h: 67 | 7.4%",
"n: 66 | 7.3%",
"i: 64 | 7.1%",
"a: 64 | 7.1%",
])
class Test4All(ExamTestCase):
"""
Meny options for frequency
"""
def check_print_contain(self, inp, correct):
"""
One function for testing print input functions.
"""
with patch("builtins.input", side_effect=inp):
with patch("sys.stdout", new=StringIO()) as fake_out:
main.main()
for val in correct:
str_data = fake_out.getvalue()
self.assertIn(val, str_data)
@tags("all")
def test_a_all(self):
"""
Testar att anropa menyval 'all' i main.py.
Använder följande som input:
{arguments}
Förväntar att följande finns med i utskrift:
{correct}
Fick följande:
{student}
"""
self.norepr = True
self._multi_arguments = ["all", "", "q"]
self.check_print_contain(self._multi_arguments, [
"17",
"199",
"907",
"the: 12 | 6.0%",
"to: 8 | 4.0%",
"and: 7 | 3.5%",
"of: 6 | 3.0%",
"street: 5 | 2.5%",
"him: 5 | 2.5%",
"he: 5 | 2.5%",
"e: 108 | 11.9%",
"t: 91 | 10.0%",
"o: 77 | 8.5%",
"h: 67 | 7.4%",
"n: 66 | 7.3%",
"i: 64 | 7.1%",
"a: 64 | 7.1%",
])
class Test4Change(ExamTestCase):
"""
Meny options for frequency
"""
@tags("change")
def test_a_change(self):
"""
Testar att anropa menyval 'all' i main.py.
Använder följande som input:
{arguments}
Förväntar att följande finns med i utskrift:
{correct}
Fick följande:
{student}
"""
self.norepr = True
self._multi_arguments = ["change", "lorum.txt", "", "all", "", "q"]
with patch('builtins.input', side_effect=self._multi_arguments):
with patch('sys.stdout', new=StringIO()) as fake_out:
main.main()
str_data = fake_out.getvalue()
self.assertIn("23", str_data)
self.assertIn("3", str_data)
self.assertIn("140", str_data)
self.assertIn("dolor: 2 | 8.0%", str_data)
self.assertIn("vivamus: 1 | 4.0%", str_data)
self.assertIn("vitae: 1 | 4.0%", str_data)
self.assertIn("varius: 1 | 4.0%", str_data)
self.assertIn("urna: 1 | 4.0%", str_data)
self.assertIn("sit: 1 | 4.0%", str_data)
self.assertIn("pellentesque: 1 | 4.0%", str_data)
self.assertIn("i: 18 | 12.9%", str_data)
self.assertIn("e: 16 | 11.4%", str_data)
self.assertIn("u: 12 | 8.6%", str_data)
self.assertIn("a: 12 | 8.6%", str_data)
self.assertIn("t: 10 | 7.1%", str_data)
self.assertIn("l: 10 | 7.1%", str_data)
self.assertIn("s: 9 | 6.4%", str_data)
if __name__ == '__main__':
runner = unittest.TextTestRunner(resultclass=ExamTestResult, verbosity=2)
unittest.main(testRunner=runner, exit=False)
| 28.784173
| 86
| 0.530242
| 7,315
| 0.909373
| 0
| 0
| 5,918
| 0.735704
| 0
| 0
| 3,446
| 0.428394
|
58b8667325936944d69237ad194f47d738bc7912
| 831
|
py
|
Python
|
olha_boca/infratores/admin.py
|
Perceu/olha-boca
|
022ff941d6bd20bb79bd1e66cd293dd2f59bf55b
|
[
"MIT"
] | null | null | null |
olha_boca/infratores/admin.py
|
Perceu/olha-boca
|
022ff941d6bd20bb79bd1e66cd293dd2f59bf55b
|
[
"MIT"
] | null | null | null |
olha_boca/infratores/admin.py
|
Perceu/olha-boca
|
022ff941d6bd20bb79bd1e66cd293dd2f59bf55b
|
[
"MIT"
] | 1
|
2022-02-20T18:43:45.000Z
|
2022-02-20T18:43:45.000Z
|
from django.contrib import admin
from olha_boca.infratores.models import Infratores
# Register your models here.
class InfratoresAdmin(admin.ModelAdmin):
list_display = ('nome', 'infracoes_a_pagar', 'total_infracoes', 'valor_a_pagar')
@admin.display(empty_value='???')
def total_infracoes(self, obj):
return obj.infracoes.count()
@admin.display(empty_value='???')
def infracoes_a_pagar(self, obj):
return obj.infracoes.filter(paga=False).count()
@admin.display(empty_value='???')
def valor_a_pagar(self, obj):
total = 0
infracoes_a_pagar = obj.infracoes.filter(paga=False).all()
for inf in infracoes_a_pagar:
total += (inf.tipo.vibs * inf.tipo.multiplicador_vibs)
return f'R$ {total:.2f}'
admin.site.register(Infratores, InfratoresAdmin)
| 33.24
| 84
| 0.688327
| 666
| 0.801444
| 0
| 0
| 523
| 0.629362
| 0
| 0
| 117
| 0.140794
|
58b8a93616ab18e3bdc1bf278f2e0062041167f0
| 6,241
|
py
|
Python
|
model.py
|
ogugugugugua/Cycle-Gan-Pytorch-Implementation
|
119c7c8b3061a14f0ca988672458351d00f144aa
|
[
"MIT"
] | null | null | null |
model.py
|
ogugugugugua/Cycle-Gan-Pytorch-Implementation
|
119c7c8b3061a14f0ca988672458351d00f144aa
|
[
"MIT"
] | null | null | null |
model.py
|
ogugugugugua/Cycle-Gan-Pytorch-Implementation
|
119c7c8b3061a14f0ca988672458351d00f144aa
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import functools
import torch.nn as nn
from torch.nn import init
import torch.functional as F
from torch.autograd import Variable
print('ok')
def weights_init_normal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weight(net,init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
class ResnetBlock(nn.Module):
def __init__(self, dim, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, use_dropout, use_bias)
def build_conv_block(self,dim,use_dropout,use_bias):
conv_block = []
conv_block += [nn.ReflectionPad2d(1)]
conv_block += [nn.Conv2d(dim,dim,kernel_size=3,padding=0,bias=use_bias),
nn.InstanceNorm2d(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
conv_block += [nn.ReflectionPad2d(1)]
conv_block += [nn.Conv2d(dim,dim,kernel_size=3,padding=0,bias=use_bias),
nn.InstanceNorm2d(dim)]
return nn.Sequential(*conv_block)
def forward(self,x):
out = x + self.conv_block(x)
return out
class G(nn.Module):
def __init__(self,dim=64,device_ids=[]):
super(G,self).__init__()
self.device_ids = device_ids
model = [nn.ReflectionPad2d(3),
nn.Conv2d(3, dim, kernel_size=7, padding=0,bias=False),
nn.InstanceNorm2d(dim),
nn.ReLU(True)]
for i in range(2):
mult = 2 ** i
model += [nn.Conv2d(dim * mult, dim * mult * 2, kernel_size=3,
stride=2, padding=1, bias=False),
nn.InstanceNorm2d(dim * mult * 2),
nn.ReLU(True)]
for i in range(9):
model += [ResnetBlock(dim*4,use_dropout=False,use_bias=False)]
for i in range(2):
mult = 2**(2 - i)
model += [nn.ConvTranspose2d(dim * mult, int(dim * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=False),
nn.InstanceNorm2d(int(dim * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(dim,3,kernel_size=7,padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
use_gpu = len(self.device_ids) > 0
if (use_gpu):
assert (torch.cuda.is_available())
if len(self.device_ids)and isinstance(input.data, torch.cuda.FloatTensor):
print('Train on GPU...')
return nn.parallel.data_parallel(self.model, input, self.device_ids)
else:
print('Train on CPU...')
return self.model(input)
class D(nn.Module):
def __init__(self,dim=64,device_ids=[]):
super(D,self).__init__()
self.device_ids = device_ids
model = [nn.Conv2d(3,dim,kernel_size=4,stride=2,padding=1),
nn.LeakyReLU(0.2,True)]
model += [nn.Conv2d(dim,dim*2,kernel_size=4,stride=2,padding=1,bias=False),
nn.InstanceNorm2d(dim*2),
nn.LeakyReLU(0.2,True)]
model += [nn.Conv2d(dim*2, dim*4, kernel_size=4, stride=2, padding=1, bias=False),
nn.InstanceNorm2d(dim*4),
nn.LeakyReLU(0.2,True)]
model += [nn.Conv2d(dim*4, dim*8, kernel_size=4, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim*8),
nn.LeakyReLU(0.2,True)]
model += [nn.Conv2d(dim*8,1,kernel_size=4,stride=1,padding=1)]
self.model = nn.Sequential(*model)
def forward(self, input):
use_gpu = len(self.device_ids) > 0
if (use_gpu):
assert (torch.cuda.is_available())
if len(self.device_ids)and isinstance(input.data, torch.cuda.FloatTensor):
print('Train on GPU...')
return nn.parallel.data_parallel(self.model, input, self.device_ids)
else:
print('Train on CPU...')
return self.model(input)
print ('kkk')
# class te(nn.Module):
# def __init__(self):
# super(te,self).__init__()
# norm_layer=nn.InstanceNorm2d
# kw = 4
# padw = 1
# input_nc=3
# n_layers=3
# ndf=64
# use_bias = False
# sequence = [
# nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
# nn.LeakyReLU(0.2, True)
# ]
#
# nf_mult = 1
# nf_mult_prev = 1
# for n in range(1, n_layers):
# nf_mult_prev = nf_mult
# nf_mult = min(2**n, 8)
# sequence += [
# nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
# kernel_size=kw, stride=2, padding=padw, bias=use_bias),
# norm_layer(ndf * nf_mult),
# nn.LeakyReLU(0.2, True)
# ]
#
# nf_mult_prev = nf_mult
# nf_mult = min(2**n_layers, 8)
# sequence += [
# nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
# kernel_size=kw, stride=1, padding=padw, bias=use_bias),
# norm_layer(ndf * nf_mult),
# nn.LeakyReLU(0.2, True)
# ]
#
# sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
#
# self.model1 = nn.Sequential(*sequence)
# def forward(self,x):
# return self.model1(x)
| 36.497076
| 94
| 0.552155
| 3,917
| 0.627624
| 0
| 0
| 0
| 0
| 0
| 0
| 1,528
| 0.244833
|
58b91dc41ea5155b61915a8cc460140b8ef148b0
| 15,872
|
py
|
Python
|
hansberger/analysis/migrations/0001_initial.py
|
097475/hansberger
|
bb4ba1cbc410e7242a12f841e447b4d68f4298f6
|
[
"MIT"
] | 1
|
2019-04-03T13:44:38.000Z
|
2019-04-03T13:44:38.000Z
|
hansberger/analysis/migrations/0001_initial.py
|
sebastianoverdolini/hansberger
|
bb4ba1cbc410e7242a12f841e447b4d68f4298f6
|
[
"MIT"
] | 4
|
2019-05-22T09:43:09.000Z
|
2019-05-29T12:22:00.000Z
|
hansberger/analysis/migrations/0001_initial.py
|
097475/hansberger
|
bb4ba1cbc410e7242a12f841e447b4d68f4298f6
|
[
"MIT"
] | 2
|
2019-04-17T09:23:32.000Z
|
2019-05-03T10:38:16.000Z
|
# Generated by Django 2.0.13 on 2019-06-27 17:04
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('research', '0001_initial'),
('datasets', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bottleneck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('homology', models.PositiveIntegerField()),
('kind', models.CharField(choices=[('consecutive', 'consecutive'), ('one_to_all', 'one_to_all'), ('all_to_all', 'all_to_all')], max_length=20)),
],
),
migrations.CreateModel(
name='Diagram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.TextField()),
('bottleneck_distance', models.FloatField()),
('bottleneck', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='analysis.Bottleneck')),
],
),
migrations.CreateModel(
name='FiltrationAnalysis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name this analysis', max_length=100)),
('slug', models.SlugField(max_length=110)),
('description', models.TextField(blank=True, help_text='Write a brief description of the analysis', max_length=500)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('precomputed_distance_matrix_json', django.contrib.postgres.fields.jsonb.JSONField(default='"[]"')),
('window_size', models.PositiveIntegerField(blank=True, default=None, help_text="Leave window size blank to not use windows. Window parameter\n is ignored when dealing with precomputed distance matrix. Always check\n the dimensions of the dataset your are operating on and plan your windows\n accordingly; eventual data that won't fit into the final window will be\n discarded.", null=True)),
('window_overlap', models.PositiveIntegerField(default=0, help_text='How many columns of overlap to have in\n consequent windows, if windows are being used. It must be at most 1\n less than window size.')),
('filtration_type', models.CharField(choices=[('VRF', 'Vietoris Rips Filtration'), ('CWRF', 'Clique Weighted Rank Filtration')], help_text='Choose the type of analysis.', max_length=50)),
('distance_matrix_metric', models.CharField(blank=True, choices=[('braycurtis', 'Braycurtis'), ('canberra', 'Canberra'), ('chebyshev', 'Chebyshev'), ('cityblock', 'City block'), ('correlation', 'Correlation'), ('cosine', 'Cosine'), ('dice', 'Dice'), ('euclidean', 'Euclidean'), ('hamming', 'Hamming'), ('jaccard', 'Jaccard'), ('jensenshannon', 'Jensen Shannon'), ('kulsinski', 'Kulsinski'), ('mahalanobis', 'Mahalonobis'), ('matching', 'Matching'), ('minkowski', 'Minkowski'), ('rogerstanimoto', 'Rogers-Tanimoto'), ('russellrao', 'Russel Rao'), ('seuclidean', 'Seuclidean'), ('sokalmichener', 'Sojal-Michener'), ('sokalsneath', 'Sokal-Sneath'), ('sqeuclidean', 'Sqeuclidean'), ('yule', 'Yule')], help_text='If Vietoris-Rips filtration is selected and not using a precomputed distance matrix, choose the\n distance metric to use on the selected dataset. This parameter is ignored in all other cases.', max_length=20)),
('max_homology_dimension', models.PositiveIntegerField(default=1, help_text='Maximum homology dimension computed. Will compute all dimensions lower than and equal to this value.\n For 1, H_0 and H_1 will be computed.')),
('max_distances_considered', models.FloatField(blank=True, default=None, help_text='Maximum distances considered when constructing filtration.\n If blank, compute the entire filtration.', null=True)),
('coeff', models.PositiveIntegerField(default=2, help_text='Compute homology with coefficients in the prime field Z/pZ for\n p=coeff.')),
('do_cocycles', models.BooleanField(default=False, help_text='Indicator of whether to compute cocycles.')),
('n_perm', models.IntegerField(blank=True, default=None, help_text='The number of points to subsample in\n a “greedy permutation,” or a furthest point sampling of the points. These points will\n be used in lieu of the full point cloud for a faster computation, at the expense of\n some accuracy, which can be bounded as a maximum bottleneck distance to all diagrams\n on the original point set', null=True)),
('entropy_normalized_graph', models.TextField(blank=True, null=True)),
('entropy_unnormalized_graph', models.TextField(blank=True, null=True)),
('dataset', models.ForeignKey(blank=True, default=None, help_text='Select the source dataset from the loaded datasets', null=True, on_delete=django.db.models.deletion.CASCADE, to='datasets.Dataset')),
('research', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='research.Research')),
],
options={
'verbose_name': 'filtration analysis',
'verbose_name_plural': 'filtration analyses',
'abstract': False,
},
),
migrations.CreateModel(
name='FiltrationWindow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.PositiveIntegerField()),
('slug', models.SlugField(max_length=150)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('start', models.PositiveIntegerField(blank=True, null=True)),
('end', models.PositiveIntegerField(blank=True, null=True)),
('result_matrix', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('diagrams', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('result_entropy_normalized', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('result_entropy_unnormalized', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('analysis', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='analysis.FiltrationAnalysis')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MapperAnalysis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name this analysis', max_length=100)),
('slug', models.SlugField(max_length=110)),
('description', models.TextField(blank=True, help_text='Write a brief description of the analysis', max_length=500)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('precomputed_distance_matrix_json', django.contrib.postgres.fields.jsonb.JSONField(default='"[]"')),
('window_size', models.PositiveIntegerField(blank=True, default=None, help_text="Leave window size blank to not use windows. Window parameter\n is ignored when dealing with precomputed distance matrix. Always check\n the dimensions of the dataset your are operating on and plan your windows\n accordingly; eventual data that won't fit into the final window will be\n discarded.", null=True)),
('window_overlap', models.PositiveIntegerField(default=0, help_text='How many columns of overlap to have in\n consequent windows, if windows are being used. It must be at most 1\n less than window size.')),
('distance_matrix_metric', models.CharField(blank=True, choices=[('braycurtis', 'Braycurtis'), ('canberra', 'Canberra'), ('chebyshev', 'Chebyshev'), ('cityblock', 'City block'), ('correlation', 'Correlation'), ('cosine', 'Cosine'), ('dice', 'Dice'), ('euclidean', 'Euclidean'), ('hamming', 'Hamming'), ('jaccard', 'Jaccard'), ('jensenshannon', 'Jensen Shannon'), ('kulsinski', 'Kulsinski'), ('mahalanobis', 'Mahalonobis'), ('matching', 'Matching'), ('minkowski', 'Minkowski'), ('rogerstanimoto', 'Rogers-Tanimoto'), ('russellrao', 'Russel Rao'), ('seuclidean', 'Seuclidean'), ('sokalmichener', 'Sojal-Michener'), ('sokalsneath', 'Sokal-Sneath'), ('sqeuclidean', 'Sqeuclidean'), ('yule', 'Yule')], help_text='If not using a precomputed matrix, choose the distance metric to use on the dataset.', max_length=20)),
('projection', models.CharField(choices=[('sum', 'Sum'), ('mean', 'Mean'), ('median', 'Median'), ('max', 'Max'), ('min', 'Min'), ('std', 'Std'), ('dist_mean', 'Dist_mean'), ('l2norm', 'L2norm'), ('knn_distance_n', 'knn_distance_n')], default='sum', help_text='Specify a projection/lens type.', max_length=50)),
('knn_n_value', models.PositiveIntegerField(blank=True, help_text='Specify the value of n in knn_distance_n', null=True)),
('scaler', models.CharField(choices=[('None', 'None'), ('MinMaxScaler', 'MinMaxScaler'), ('MaxAbsScaler', 'MaxAbsScaler'), ('RobustScaler', 'RobustScaler'), ('StandardScaler', 'StandardScaler')], default='MinMaxScaler', help_text='Scaler of the data applied after mapping. Use None for no scaling.', max_length=50)),
('use_original_data', models.BooleanField(default=False, help_text='If ticked, clustering is run on the original data,\n else it will be run on the lower dimensional projection.')),
('clusterer', models.CharField(choices=[('k-means', 'K-Means'), ('affinity_propagation', 'Affinity propagation'), ('mean-shift', 'Mean-shift'), ('spectral_clustering', 'Spectral clustering'), ('agglomerative_clustering', 'StandardScaler'), ('DBSCAN(min_samples=1)', 'DBSCAN(min_samples=1)'), ('DBSCAN', 'DBSCAN'), ('gaussian_mixtures', 'Gaussian mixtures'), ('birch', 'Birch')], default='DBSCAN', help_text='Select the clustering algorithm.', max_length=50)),
('cover_n_cubes', models.PositiveIntegerField(default=10, help_text='Number of hypercubes along each dimension.\n Sometimes referred to as resolution.')),
('cover_perc_overlap', models.FloatField(default=0.5, help_text='Amount of overlap between adjacent cubes calculated\n only along 1 dimension.')),
('graph_nerve_min_intersection', models.IntegerField(default=1, help_text='Minimum intersection considered when\n computing the nerve. An edge will be created only when the\n intersection between two nodes is greater than or equal to\n min_intersection')),
('precomputed', models.BooleanField(default=False, help_text='Tell Mapper whether the data that you are clustering on\n is a precomputed distance matrix. If set to True, the assumption is that you are\n also telling your clusterer that metric=’precomputed’ (which is an argument for\n DBSCAN among others), which will then cause the clusterer to expect a square\n distance matrix for each hypercube. precomputed=True will give a square matrix\n to the clusterer to fit on for each hypercube.')),
('remove_duplicate_nodes', models.BooleanField(default=False, help_text='Removes duplicate nodes before edges are\n determined. A node is considered to be duplicate if it has exactly\n the same set of points as another node.')),
('dataset', models.ForeignKey(blank=True, default=None, help_text='Select the source dataset from the loaded datasets', null=True, on_delete=django.db.models.deletion.CASCADE, to='datasets.Dataset')),
('research', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='research.Research')),
],
options={
'verbose_name': 'mapper algorithm analysis',
'verbose_name_plural': 'mapper algoritm analyses',
'abstract': False,
},
),
migrations.CreateModel(
name='MapperWindow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.PositiveIntegerField()),
('slug', models.SlugField(max_length=150)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('start', models.PositiveIntegerField(blank=True, null=True)),
('end', models.PositiveIntegerField(blank=True, null=True)),
('graph', models.TextField(blank=True, null=True)),
('analysis', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='windows', related_query_name='window', to='analysis.MapperAnalysis')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='diagram',
name='window1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='window1', to='analysis.FiltrationWindow'),
),
migrations.AddField(
model_name='diagram',
name='window2',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='window2', to='analysis.FiltrationWindow'),
),
migrations.AddField(
model_name='bottleneck',
name='analysis',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='analysis.FiltrationAnalysis'),
),
migrations.AddField(
model_name='bottleneck',
name='window',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='analysis.FiltrationWindow'),
),
migrations.AlterUniqueTogether(
name='mapperanalysis',
unique_together={('slug', 'research')},
),
migrations.AlterUniqueTogether(
name='filtrationanalysis',
unique_together={('slug', 'research')},
),
]
| 99.823899
| 951
| 0.599609
| 15,709
| 0.989232
| 0
| 0
| 0
| 0
| 0
| 0
| 8,163
| 0.514043
|
58b94a18dc5fb864b68af5b8440bfcf6bdf51d21
| 1,589
|
py
|
Python
|
src/DOMObjects/schema.py
|
villagertech/DOMObjects
|
6a86caca5160d2488cc19823e71e9e7ba99a4a0c
|
[
"MIT"
] | null | null | null |
src/DOMObjects/schema.py
|
villagertech/DOMObjects
|
6a86caca5160d2488cc19823e71e9e7ba99a4a0c
|
[
"MIT"
] | null | null | null |
src/DOMObjects/schema.py
|
villagertech/DOMObjects
|
6a86caca5160d2488cc19823e71e9e7ba99a4a0c
|
[
"MIT"
] | null | null | null |
__author__ = "Rob MacKinnon <rome@villagertech.com>"
__package__ = "DOMObjects"
__name__ = "DOMObjects.schema"
__license__ = "MIT"
class DOMSchema(object):
""" @abstract Structure object for creating more advanced DOM trees
@params children [dict] Default structure of children
@params dictgroups [dict] Default structure of dictgroups
@params props [dict] Default structure of properties
@example Sample object
_schema = DOMSchema()
_schema.children=
_settings_schema.children = {
"sip": {},
"schedules": {},
"favorites": {
"dictgroups": ["sip", "http"]
}
}
"""
def __init__(self,
children: dict = {},
dictgroups: dict = {},
props: dict = {}):
""" @abstract Object initializer and bootstraps first object.
@params children [dict] Default structure of children
@params dictgroups [dict] Default structure of dictgroups
@params props [dict] Default structure of properties
@returns [DOMSchema] object
"""
self.dictgroups = dictgroups
self.children = children
self.props = props
@property
def keys(self) -> list:
""" @abstract Returns all top-level keys in schema
@returns [list] of keys
"""
_keys = list()
_keys.extend(self.children.keys())
_keys.extend(self.dictgroups.keys())
_keys.extend(self.props.keys())
return _keys
| 33.104167
| 71
| 0.574575
| 1,455
| 0.91567
| 0
| 0
| 316
| 0.198867
| 0
| 0
| 1,034
| 0.650724
|
58ba74567e6fec0a65ad5136fbd9ca609c0ebda8
| 416
|
py
|
Python
|
Python/6 - kyu/6 kyu - Detect Pangram.py
|
danielbom/codewars
|
d45b5a813c6f1d952a50d22f0b2fcea4ef3d0e27
|
[
"MIT"
] | null | null | null |
Python/6 - kyu/6 kyu - Detect Pangram.py
|
danielbom/codewars
|
d45b5a813c6f1d952a50d22f0b2fcea4ef3d0e27
|
[
"MIT"
] | null | null | null |
Python/6 - kyu/6 kyu - Detect Pangram.py
|
danielbom/codewars
|
d45b5a813c6f1d952a50d22f0b2fcea4ef3d0e27
|
[
"MIT"
] | null | null | null |
# https://www.codewars.com/kata/detect-pangram/train/python
# My solution
import string
def is_pangram(text):
return len( {letter.lower() for letter in text if letter.isalpha()} ) == 26
# ...
import string
def is_pangram(s):
return set(string.lowercase) <= set(s.lower())
# ...
import string
def is_pangram(s):
s = s.lower()
return all(letter in s for letter in string.lowercase)
| 23.111111
| 80
| 0.658654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.206731
|
58bb3b75ffbc07bac010c5a20ea7da7ddc296fd6
| 3,709
|
py
|
Python
|
L1Trigger/L1TCalorimeter/python/customiseReEmulateCaloLayer2.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
L1Trigger/L1TCalorimeter/python/customiseReEmulateCaloLayer2.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
L1Trigger/L1TCalorimeter/python/customiseReEmulateCaloLayer2.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
def reEmulateLayer2(process):
process.load('L1Trigger/L1TCalorimeter/simCaloStage2Digis_cfi')
process.load('L1Trigger.L1TCalorimeter.caloStage2Params_2017_v1_7_excl30_cfi')
process.simCaloStage2Digis.towerToken = cms.InputTag("caloStage2Digis", "CaloTower")
process.caloLayer2 = cms.Path(process.simCaloStage2Digis)
process.schedule.append(process.caloLayer2)
return process
def hwEmulCompHistos(process):
process.TFileService = cms.Service("TFileService",
fileName = cms.string("l1tCalo_2016_simHistos.root"),
closeFileFast = cms.untracked.bool(True)
)
# histograms
process.load('L1Trigger.L1TCalorimeter.l1tStage2CaloAnalyzer_cfi')
process.l1tStage2CaloAnalyzer.doEvtDisp = False
process.l1tStage2CaloAnalyzer.mpBx = 0
process.l1tStage2CaloAnalyzer.dmxBx = 0
process.l1tStage2CaloAnalyzer.allBx = False
process.l1tStage2CaloAnalyzer.towerToken = cms.InputTag("simCaloStage2Digis", "MP")
process.l1tStage2CaloAnalyzer.clusterToken = cms.InputTag("None")
process.l1tStage2CaloAnalyzer.mpEGToken = cms.InputTag("simCaloStage2Digis", "MP")
process.l1tStage2CaloAnalyzer.mpTauToken = cms.InputTag("simCaloStage2Digis", "MP")
process.l1tStage2CaloAnalyzer.mpJetToken = cms.InputTag("simCaloStage2Digis", "MP")
process.l1tStage2CaloAnalyzer.mpEtSumToken = cms.InputTag("simCaloStage2Digis", "MP")
process.l1tStage2CaloAnalyzer.egToken = cms.InputTag("simCaloStage2Digis")
process.l1tStage2CaloAnalyzer.tauToken = cms.InputTag("simCaloStage2Digis")
process.l1tStage2CaloAnalyzer.jetToken = cms.InputTag("simCaloStage2Digis")
process.l1tStage2CaloAnalyzer.etSumToken = cms.InputTag("simCaloStage2Digis")
import L1Trigger.L1TCalorimeter.l1tStage2CaloAnalyzer_cfi
process.l1tCaloStage2HwHistos = L1Trigger.L1TCalorimeter.l1tStage2CaloAnalyzer_cfi.l1tStage2CaloAnalyzer.clone()
process.l1tCaloStage2HwHistos.doEvtDisp = False
process.l1tCaloStage2HwHistos.mpBx = 0
process.l1tCaloStage2HwHistos.dmxBx = 0
process.l1tCaloStage2HwHistos.allBx = False
process.l1tCaloStage2HwHistos.towerToken = cms.InputTag("caloStage2Digis", "CaloTower")
process.l1tCaloStage2HwHistos.clusterToken = cms.InputTag("None")
process.l1tCaloStage2HwHistos.mpEGToken = cms.InputTag("caloStage2Digis", "MP")
process.l1tCaloStage2HwHistos.mpTauToken = cms.InputTag("caloStage2Digis","MP")
process.l1tCaloStage2HwHistos.mpJetToken = cms.InputTag("caloStage2Digis", "MP")
process.l1tCaloStage2HwHistos.mpEtSumToken = cms.InputTag("caloStage2Digis", "MP")
process.l1tCaloStage2HwHistos.egToken = cms.InputTag("caloStage2Digis", "EGamma")
process.l1tCaloStage2HwHistos.tauToken = cms.InputTag("caloStage2Digis", "Tau")
process.l1tCaloStage2HwHistos.jetToken = cms.InputTag("caloStage2Digis", "Jet")
process.l1tCaloStage2HwHistos.etSumToken = cms.InputTag("caloStage2Digis", "EtSum")
process.hwEmulHistos = cms.Path(
process.l1tStage2CaloAnalyzer
+process.l1tCaloStage2HwHistos
)
process.schedule.append(process.hwEmulHistos)
return process
def reEmulateLayer2ValHistos(process):
process.load('EventFilter.L1TRawToDigi.caloTowersFilter_cfi')
reEmulateLayer2(process)
hwEmulCompHistos(process)
#process.l1ntupleraw.insert(0,process.caloTowersFilter)
#process.l1ntuplesim.insert(0,process.caloTowersFilter)
process.caloLayer2.insert(0,process.caloTowersFilter)
process.hwEmulHistos.insert(0,process.caloTowersFilter)
return process
| 44.686747
| 117
| 0.758425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 822
| 0.221623
|